From e7682e1496a492dfb66f543e60fbf962f5103978 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Fri, 9 Dec 2011 20:21:19 +0000 Subject: [PATCH] HDFS-2511. Add dev script to generate HDFS protobufs. (tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1212609 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + hadoop-hdfs-project/hadoop-hdfs/pom.xml | 24 + .../proto/ClientDatanodeProtocolProtos.java | 4162 -- .../proto/ClientNamenodeProtocolProtos.java | 46529 ---------------- .../protocol/proto/DataTransferProtos.java | 10690 ---- .../proto/DatanodeProtocolProtos.java | 17233 ------ .../hdfs/protocol/proto/HdfsProtos.java | 21240 ------- .../proto/InterDatanodeProtocolProtos.java | 2517 - .../protocol/proto/JournalProtocolProtos.java | 2234 - .../proto/NamenodeProtocolProtos.java | 9015 --- .../proto/ClientDatanodeProtocol.proto | 0 .../proto/ClientNamenodeProtocol.proto | 0 .../{ => main}/proto/DatanodeProtocol.proto | 0 .../proto/InterDatanodeProtocol.proto | 0 .../{ => main}/proto/JournalProtocol.proto | 0 .../{ => main}/proto/NamenodeProtocol.proto | 0 .../src/{ => main}/proto/datatransfer.proto | 0 .../src/{ => main}/proto/hdfs.proto | 0 18 files changed, 26 insertions(+), 113620 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/ClientDatanodeProtocolProtos.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/ClientNamenodeProtocolProtos.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/DataTransferProtos.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/DatanodeProtocolProtos.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/InterDatanodeProtocolProtos.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/JournalProtocolProtos.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/NamenodeProtocolProtos.java rename hadoop-hdfs-project/hadoop-hdfs/src/{ => main}/proto/ClientDatanodeProtocol.proto (100%) rename hadoop-hdfs-project/hadoop-hdfs/src/{ => main}/proto/ClientNamenodeProtocol.proto (100%) rename hadoop-hdfs-project/hadoop-hdfs/src/{ => main}/proto/DatanodeProtocol.proto (100%) rename hadoop-hdfs-project/hadoop-hdfs/src/{ => main}/proto/InterDatanodeProtocol.proto (100%) rename hadoop-hdfs-project/hadoop-hdfs/src/{ => main}/proto/JournalProtocol.proto (100%) rename hadoop-hdfs-project/hadoop-hdfs/src/{ => main}/proto/NamenodeProtocol.proto (100%) rename hadoop-hdfs-project/hadoop-hdfs/src/{ => main}/proto/datatransfer.proto (100%) rename hadoop-hdfs-project/hadoop-hdfs/src/{ => main}/proto/hdfs.proto (100%) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 14c98a63735..85543c12ac8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -93,6 +93,8 @@ Trunk (unreleased changes) HDFS-2597 ClientNameNodeProtocol in Protocol Buffers (sanjay) + HDFS-2511. Add dev script to generate HDFS protobufs. (tucu) + OPTIMIZATIONS HDFS-2477. Optimize computing the diff between a block report and the namenode state. (Tomasz Nykiel via hairong) diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 1bcc372706e..25eaef2579a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -205,6 +205,7 @@ + ${project.build.directory}/generated-sources/java ${project.build.directory}/generated-src/main/jsp @@ -215,6 +216,29 @@ org.apache.maven.plugins maven-antrun-plugin + + compile-proto + generate-sources + + run + + + + + PROTO_DIR=${basedir}/src/main/proto + ls $PROTO_DIR &> /dev/null + if [ $? = 0 ]; then + JAVA_DIR=${project.build.directory}/generated-sources/java + mkdir -p $JAVA_DIR + ls $PROTO_DIR/*.proto | xargs -n 1 protoc -I$PROTO_DIR --java_out=$JAVA_DIR + fi + + + + + + + create-web-xmls compile diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/ClientDatanodeProtocolProtos.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/ClientDatanodeProtocolProtos.java deleted file mode 100644 index ceb2be940c8..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/ClientDatanodeProtocolProtos.java +++ /dev/null @@ -1,4162 +0,0 @@ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: ClientDatanodeProtocol.proto - -package org.apache.hadoop.hdfs.protocol.proto; - -public final class ClientDatanodeProtocolProtos { - private ClientDatanodeProtocolProtos() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistry registry) { - } - public interface GetReplicaVisibleLengthRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ExtendedBlockProto block = 1; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder(); - } - public static final class GetReplicaVisibleLengthRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetReplicaVisibleLengthRequestProtoOrBuilder { - // Use GetReplicaVisibleLengthRequestProto.newBuilder() to construct. - private GetReplicaVisibleLengthRequestProto(Builder builder) { - super(builder); - } - private GetReplicaVisibleLengthRequestProto(boolean noInit) {} - - private static final GetReplicaVisibleLengthRequestProto defaultInstance; - public static GetReplicaVisibleLengthRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetReplicaVisibleLengthRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ExtendedBlockProto block = 1; - public static final int BLOCK_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - private void initFields() { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, block_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, block_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto) obj; - - boolean result = true; - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance()) return this; - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlock()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ExtendedBlockProto block = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // @@protoc_insertion_point(builder_scope:GetReplicaVisibleLengthRequestProto) - } - - static { - defaultInstance = new GetReplicaVisibleLengthRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetReplicaVisibleLengthRequestProto) - } - - public interface GetReplicaVisibleLengthResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint64 length = 1; - boolean hasLength(); - long getLength(); - } - public static final class GetReplicaVisibleLengthResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetReplicaVisibleLengthResponseProtoOrBuilder { - // Use GetReplicaVisibleLengthResponseProto.newBuilder() to construct. - private GetReplicaVisibleLengthResponseProto(Builder builder) { - super(builder); - } - private GetReplicaVisibleLengthResponseProto(boolean noInit) {} - - private static final GetReplicaVisibleLengthResponseProto defaultInstance; - public static GetReplicaVisibleLengthResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetReplicaVisibleLengthResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint64 length = 1; - public static final int LENGTH_FIELD_NUMBER = 1; - private long length_; - public boolean hasLength() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getLength() { - return length_; - } - - private void initFields() { - length_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasLength()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, length_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, length_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) obj; - - boolean result = true; - result = result && (hasLength() == other.hasLength()); - if (hasLength()) { - result = result && (getLength() - == other.getLength()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasLength()) { - hash = (37 * hash) + LENGTH_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getLength()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - length_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.length_ = length_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance()) return this; - if (other.hasLength()) { - setLength(other.getLength()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasLength()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - length_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required uint64 length = 1; - private long length_ ; - public boolean hasLength() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getLength() { - return length_; - } - public Builder setLength(long value) { - bitField0_ |= 0x00000001; - length_ = value; - onChanged(); - return this; - } - public Builder clearLength() { - bitField0_ = (bitField0_ & ~0x00000001); - length_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:GetReplicaVisibleLengthResponseProto) - } - - static { - defaultInstance = new GetReplicaVisibleLengthResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetReplicaVisibleLengthResponseProto) - } - - public interface RefreshNamenodesRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class RefreshNamenodesRequestProto extends - com.google.protobuf.GeneratedMessage - implements RefreshNamenodesRequestProtoOrBuilder { - // Use RefreshNamenodesRequestProto.newBuilder() to construct. - private RefreshNamenodesRequestProto(Builder builder) { - super(builder); - } - private RefreshNamenodesRequestProto(boolean noInit) {} - - private static final RefreshNamenodesRequestProto defaultInstance; - public static RefreshNamenodesRequestProto getDefaultInstance() { - return defaultInstance; - } - - public RefreshNamenodesRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesRequestProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:RefreshNamenodesRequestProto) - } - - static { - defaultInstance = new RefreshNamenodesRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RefreshNamenodesRequestProto) - } - - public interface RefreshNamenodesResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class RefreshNamenodesResponseProto extends - com.google.protobuf.GeneratedMessage - implements RefreshNamenodesResponseProtoOrBuilder { - // Use RefreshNamenodesResponseProto.newBuilder() to construct. - private RefreshNamenodesResponseProto(Builder builder) { - super(builder); - } - private RefreshNamenodesResponseProto(boolean noInit) {} - - private static final RefreshNamenodesResponseProto defaultInstance; - public static RefreshNamenodesResponseProto getDefaultInstance() { - return defaultInstance; - } - - public RefreshNamenodesResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:RefreshNamenodesResponseProto) - } - - static { - defaultInstance = new RefreshNamenodesResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RefreshNamenodesResponseProto) - } - - public interface DeleteBlockPoolRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string blockPool = 1; - boolean hasBlockPool(); - String getBlockPool(); - - // required bool force = 2; - boolean hasForce(); - boolean getForce(); - } - public static final class DeleteBlockPoolRequestProto extends - com.google.protobuf.GeneratedMessage - implements DeleteBlockPoolRequestProtoOrBuilder { - // Use DeleteBlockPoolRequestProto.newBuilder() to construct. - private DeleteBlockPoolRequestProto(Builder builder) { - super(builder); - } - private DeleteBlockPoolRequestProto(boolean noInit) {} - - private static final DeleteBlockPoolRequestProto defaultInstance; - public static DeleteBlockPoolRequestProto getDefaultInstance() { - return defaultInstance; - } - - public DeleteBlockPoolRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string blockPool = 1; - public static final int BLOCKPOOL_FIELD_NUMBER = 1; - private java.lang.Object blockPool_; - public boolean hasBlockPool() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getBlockPool() { - java.lang.Object ref = blockPool_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - blockPool_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getBlockPoolBytes() { - java.lang.Object ref = blockPool_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - blockPool_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required bool force = 2; - public static final int FORCE_FIELD_NUMBER = 2; - private boolean force_; - public boolean hasForce() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public boolean getForce() { - return force_; - } - - private void initFields() { - blockPool_ = ""; - force_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlockPool()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasForce()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getBlockPoolBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(2, force_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getBlockPoolBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(2, force_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto) obj; - - boolean result = true; - result = result && (hasBlockPool() == other.hasBlockPool()); - if (hasBlockPool()) { - result = result && getBlockPool() - .equals(other.getBlockPool()); - } - result = result && (hasForce() == other.hasForce()); - if (hasForce()) { - result = result && (getForce() - == other.getForce()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlockPool()) { - hash = (37 * hash) + BLOCKPOOL_FIELD_NUMBER; - hash = (53 * hash) + getBlockPool().hashCode(); - } - if (hasForce()) { - hash = (37 * hash) + FORCE_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getForce()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - blockPool_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - force_ = false; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.blockPool_ = blockPool_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.force_ = force_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance()) return this; - if (other.hasBlockPool()) { - setBlockPool(other.getBlockPool()); - } - if (other.hasForce()) { - setForce(other.getForce()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlockPool()) { - - return false; - } - if (!hasForce()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - blockPool_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - force_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required string blockPool = 1; - private java.lang.Object blockPool_ = ""; - public boolean hasBlockPool() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getBlockPool() { - java.lang.Object ref = blockPool_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - blockPool_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setBlockPool(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - blockPool_ = value; - onChanged(); - return this; - } - public Builder clearBlockPool() { - bitField0_ = (bitField0_ & ~0x00000001); - blockPool_ = getDefaultInstance().getBlockPool(); - onChanged(); - return this; - } - void setBlockPool(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - blockPool_ = value; - onChanged(); - } - - // required bool force = 2; - private boolean force_ ; - public boolean hasForce() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public boolean getForce() { - return force_; - } - public Builder setForce(boolean value) { - bitField0_ |= 0x00000002; - force_ = value; - onChanged(); - return this; - } - public Builder clearForce() { - bitField0_ = (bitField0_ & ~0x00000002); - force_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:DeleteBlockPoolRequestProto) - } - - static { - defaultInstance = new DeleteBlockPoolRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:DeleteBlockPoolRequestProto) - } - - public interface DeleteBlockPoolResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class DeleteBlockPoolResponseProto extends - com.google.protobuf.GeneratedMessage - implements DeleteBlockPoolResponseProtoOrBuilder { - // Use DeleteBlockPoolResponseProto.newBuilder() to construct. - private DeleteBlockPoolResponseProto(Builder builder) { - super(builder); - } - private DeleteBlockPoolResponseProto(boolean noInit) {} - - private static final DeleteBlockPoolResponseProto defaultInstance; - public static DeleteBlockPoolResponseProto getDefaultInstance() { - return defaultInstance; - } - - public DeleteBlockPoolResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:DeleteBlockPoolResponseProto) - } - - static { - defaultInstance = new DeleteBlockPoolResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:DeleteBlockPoolResponseProto) - } - - public interface GetBlockLocalPathInfoRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ExtendedBlockProto block = 1; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder(); - - // required .BlockTokenIdentifierProto token = 2; - boolean hasToken(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder(); - } - public static final class GetBlockLocalPathInfoRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetBlockLocalPathInfoRequestProtoOrBuilder { - // Use GetBlockLocalPathInfoRequestProto.newBuilder() to construct. - private GetBlockLocalPathInfoRequestProto(Builder builder) { - super(builder); - } - private GetBlockLocalPathInfoRequestProto(boolean noInit) {} - - private static final GetBlockLocalPathInfoRequestProto defaultInstance; - public static GetBlockLocalPathInfoRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetBlockLocalPathInfoRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ExtendedBlockProto block = 1; - public static final int BLOCK_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - // required .BlockTokenIdentifierProto token = 2; - public static final int TOKEN_FIELD_NUMBER = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_; - public boolean hasToken() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() { - return token_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() { - return token_; - } - - private void initFields() { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasToken()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - if (!getToken().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, block_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, token_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, block_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, token_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto) obj; - - boolean result = true; - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && (hasToken() == other.hasToken()); - if (hasToken()) { - result = result && getToken() - .equals(other.getToken()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - if (hasToken()) { - hash = (37 * hash) + TOKEN_FIELD_NUMBER; - hash = (53 * hash) + getToken().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - getTokenFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - if (tokenBuilder_ == null) { - token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - } else { - tokenBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (tokenBuilder_ == null) { - result.token_ = token_; - } else { - result.token_ = tokenBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance()) return this; - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - if (other.hasToken()) { - mergeToken(other.getToken()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlock()) { - - return false; - } - if (!hasToken()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - if (!getToken().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - case 18: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(); - if (hasToken()) { - subBuilder.mergeFrom(getToken()); - } - input.readMessage(subBuilder, extensionRegistry); - setToken(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ExtendedBlockProto block = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // required .BlockTokenIdentifierProto token = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> tokenBuilder_; - public boolean hasToken() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() { - if (tokenBuilder_ == null) { - return token_; - } else { - return tokenBuilder_.getMessage(); - } - } - public Builder setToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) { - if (tokenBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - token_ = value; - onChanged(); - } else { - tokenBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setToken( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder builderForValue) { - if (tokenBuilder_ == null) { - token_ = builderForValue.build(); - onChanged(); - } else { - tokenBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergeToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) { - if (tokenBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - token_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance()) { - token_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(token_).mergeFrom(value).buildPartial(); - } else { - token_ = value; - } - onChanged(); - } else { - tokenBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearToken() { - if (tokenBuilder_ == null) { - token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - onChanged(); - } else { - tokenBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder getTokenBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getTokenFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() { - if (tokenBuilder_ != null) { - return tokenBuilder_.getMessageOrBuilder(); - } else { - return token_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> - getTokenFieldBuilder() { - if (tokenBuilder_ == null) { - tokenBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder>( - token_, - getParentForChildren(), - isClean()); - token_ = null; - } - return tokenBuilder_; - } - - // @@protoc_insertion_point(builder_scope:GetBlockLocalPathInfoRequestProto) - } - - static { - defaultInstance = new GetBlockLocalPathInfoRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetBlockLocalPathInfoRequestProto) - } - - public interface GetBlockLocalPathInfoResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ExtendedBlockProto block = 1; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder(); - - // required string localPath = 2; - boolean hasLocalPath(); - String getLocalPath(); - - // required string localMetaPath = 3; - boolean hasLocalMetaPath(); - String getLocalMetaPath(); - } - public static final class GetBlockLocalPathInfoResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetBlockLocalPathInfoResponseProtoOrBuilder { - // Use GetBlockLocalPathInfoResponseProto.newBuilder() to construct. - private GetBlockLocalPathInfoResponseProto(Builder builder) { - super(builder); - } - private GetBlockLocalPathInfoResponseProto(boolean noInit) {} - - private static final GetBlockLocalPathInfoResponseProto defaultInstance; - public static GetBlockLocalPathInfoResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetBlockLocalPathInfoResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ExtendedBlockProto block = 1; - public static final int BLOCK_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - // required string localPath = 2; - public static final int LOCALPATH_FIELD_NUMBER = 2; - private java.lang.Object localPath_; - public boolean hasLocalPath() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getLocalPath() { - java.lang.Object ref = localPath_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - localPath_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getLocalPathBytes() { - java.lang.Object ref = localPath_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - localPath_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string localMetaPath = 3; - public static final int LOCALMETAPATH_FIELD_NUMBER = 3; - private java.lang.Object localMetaPath_; - public boolean hasLocalMetaPath() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getLocalMetaPath() { - java.lang.Object ref = localMetaPath_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - localMetaPath_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getLocalMetaPathBytes() { - java.lang.Object ref = localMetaPath_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - localMetaPath_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - localPath_ = ""; - localMetaPath_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasLocalPath()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasLocalMetaPath()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, block_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getLocalPathBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getLocalMetaPathBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, block_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getLocalPathBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getLocalMetaPathBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) obj; - - boolean result = true; - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && (hasLocalPath() == other.hasLocalPath()); - if (hasLocalPath()) { - result = result && getLocalPath() - .equals(other.getLocalPath()); - } - result = result && (hasLocalMetaPath() == other.hasLocalMetaPath()); - if (hasLocalMetaPath()) { - result = result && getLocalMetaPath() - .equals(other.getLocalMetaPath()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - if (hasLocalPath()) { - hash = (37 * hash) + LOCALPATH_FIELD_NUMBER; - hash = (53 * hash) + getLocalPath().hashCode(); - } - if (hasLocalMetaPath()) { - hash = (37 * hash) + LOCALMETAPATH_FIELD_NUMBER; - hash = (53 * hash) + getLocalMetaPath().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - localPath_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - localMetaPath_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.localPath_ = localPath_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.localMetaPath_ = localMetaPath_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance()) return this; - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - if (other.hasLocalPath()) { - setLocalPath(other.getLocalPath()); - } - if (other.hasLocalMetaPath()) { - setLocalMetaPath(other.getLocalMetaPath()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlock()) { - - return false; - } - if (!hasLocalPath()) { - - return false; - } - if (!hasLocalMetaPath()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - case 18: { - bitField0_ |= 0x00000002; - localPath_ = input.readBytes(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - localMetaPath_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required .ExtendedBlockProto block = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // required string localPath = 2; - private java.lang.Object localPath_ = ""; - public boolean hasLocalPath() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getLocalPath() { - java.lang.Object ref = localPath_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - localPath_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setLocalPath(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - localPath_ = value; - onChanged(); - return this; - } - public Builder clearLocalPath() { - bitField0_ = (bitField0_ & ~0x00000002); - localPath_ = getDefaultInstance().getLocalPath(); - onChanged(); - return this; - } - void setLocalPath(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - localPath_ = value; - onChanged(); - } - - // required string localMetaPath = 3; - private java.lang.Object localMetaPath_ = ""; - public boolean hasLocalMetaPath() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getLocalMetaPath() { - java.lang.Object ref = localMetaPath_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - localMetaPath_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setLocalMetaPath(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - localMetaPath_ = value; - onChanged(); - return this; - } - public Builder clearLocalMetaPath() { - bitField0_ = (bitField0_ & ~0x00000004); - localMetaPath_ = getDefaultInstance().getLocalMetaPath(); - onChanged(); - return this; - } - void setLocalMetaPath(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000004; - localMetaPath_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:GetBlockLocalPathInfoResponseProto) - } - - static { - defaultInstance = new GetBlockLocalPathInfoResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetBlockLocalPathInfoResponseProto) - } - - public static abstract class ClientDatanodeProtocolService - implements com.google.protobuf.Service { - protected ClientDatanodeProtocolService() {} - - public interface Interface { - public abstract void getReplicaVisibleLength( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void refreshNamenode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void deleteBlockPool( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getBlockLocalPathInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request, - com.google.protobuf.RpcCallback done); - - } - - public static com.google.protobuf.Service newReflectiveService( - final Interface impl) { - return new ClientDatanodeProtocolService() { - @java.lang.Override - public void getReplicaVisibleLength( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getReplicaVisibleLength(controller, request, done); - } - - @java.lang.Override - public void refreshNamenode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.refreshNamenode(controller, request, done); - } - - @java.lang.Override - public void deleteBlockPool( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.deleteBlockPool(controller, request, done); - } - - @java.lang.Override - public void getBlockLocalPathInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getBlockLocalPathInfo(controller, request, done); - } - - }; - } - - public static com.google.protobuf.BlockingService - newReflectiveBlockingService(final BlockingInterface impl) { - return new com.google.protobuf.BlockingService() { - public final com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptorForType() { - return getDescriptor(); - } - - public final com.google.protobuf.Message callBlockingMethod( - com.google.protobuf.Descriptors.MethodDescriptor method, - com.google.protobuf.RpcController controller, - com.google.protobuf.Message request) - throws com.google.protobuf.ServiceException { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.callBlockingMethod() given method descriptor for " + - "wrong service type."); - } - switch(method.getIndex()) { - case 0: - return impl.getReplicaVisibleLength(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)request); - case 1: - return impl.refreshNamenode(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)request); - case 2: - return impl.deleteBlockPool(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)request); - case 3: - return impl.getBlockLocalPathInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)request); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getRequestPrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getRequestPrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getResponsePrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getResponsePrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - }; - } - - public abstract void getReplicaVisibleLength( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void refreshNamenode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void deleteBlockPool( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getBlockLocalPathInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request, - com.google.protobuf.RpcCallback done); - - public static final - com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.getDescriptor().getServices().get(0); - } - public final com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptorForType() { - return getDescriptor(); - } - - public final void callMethod( - com.google.protobuf.Descriptors.MethodDescriptor method, - com.google.protobuf.RpcController controller, - com.google.protobuf.Message request, - com.google.protobuf.RpcCallback< - com.google.protobuf.Message> done) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.callMethod() given method descriptor for wrong " + - "service type."); - } - switch(method.getIndex()) { - case 0: - this.getReplicaVisibleLength(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 1: - this.refreshNamenode(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 2: - this.deleteBlockPool(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 3: - this.getBlockLocalPathInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getRequestPrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getRequestPrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getResponsePrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getResponsePrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public static Stub newStub( - com.google.protobuf.RpcChannel channel) { - return new Stub(channel); - } - - public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService implements Interface { - private Stub(com.google.protobuf.RpcChannel channel) { - this.channel = channel; - } - - private final com.google.protobuf.RpcChannel channel; - - public com.google.protobuf.RpcChannel getChannel() { - return channel; - } - - public void getReplicaVisibleLength( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(0), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance())); - } - - public void refreshNamenode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(1), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance())); - } - - public void deleteBlockPool( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(2), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance())); - } - - public void getBlockLocalPathInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(3), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance())); - } - } - - public static BlockingInterface newBlockingStub( - com.google.protobuf.BlockingRpcChannel channel) { - return new BlockingStub(channel); - } - - public interface BlockingInterface { - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getReplicaVisibleLength( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto refreshNamenode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto deleteBlockPool( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getBlockLocalPathInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request) - throws com.google.protobuf.ServiceException; - } - - private static final class BlockingStub implements BlockingInterface { - private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { - this.channel = channel; - } - - private final com.google.protobuf.BlockingRpcChannel channel; - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getReplicaVisibleLength( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(0), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto refreshNamenode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(1), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto deleteBlockPool( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(2), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getBlockLocalPathInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(3), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance()); - } - - } - } - - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetReplicaVisibleLengthRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetReplicaVisibleLengthRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetReplicaVisibleLengthResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetReplicaVisibleLengthResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RefreshNamenodesRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RefreshNamenodesRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RefreshNamenodesResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RefreshNamenodesResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_DeleteBlockPoolRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_DeleteBlockPoolRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_DeleteBlockPoolResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_DeleteBlockPoolResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetBlockLocalPathInfoRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetBlockLocalPathInfoRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetBlockLocalPathInfoResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetBlockLocalPathInfoResponseProto_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; - } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n\034ClientDatanodeProtocol.proto\032\nhdfs.pro" + - "to\"I\n#GetReplicaVisibleLengthRequestProt" + - "o\022\"\n\005block\030\001 \002(\0132\023.ExtendedBlockProto\"6\n" + - "$GetReplicaVisibleLengthResponseProto\022\016\n" + - "\006length\030\001 \002(\004\"\036\n\034RefreshNamenodesRequest" + - "Proto\"\037\n\035RefreshNamenodesResponseProto\"?" + - "\n\033DeleteBlockPoolRequestProto\022\021\n\tblockPo" + - "ol\030\001 \002(\t\022\r\n\005force\030\002 \002(\010\"\036\n\034DeleteBlockPo" + - "olResponseProto\"r\n!GetBlockLocalPathInfo" + - "RequestProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBl", - "ockProto\022)\n\005token\030\002 \002(\0132\032.BlockTokenIden" + - "tifierProto\"r\n\"GetBlockLocalPathInfoResp" + - "onseProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBlock" + - "Proto\022\021\n\tlocalPath\030\002 \002(\t\022\025\n\rlocalMetaPat" + - "h\030\003 \002(\t2\213\003\n\035ClientDatanodeProtocolServic" + - "e\022f\n\027getReplicaVisibleLength\022$.GetReplic" + - "aVisibleLengthRequestProto\032%.GetReplicaV" + - "isibleLengthResponseProto\022P\n\017refreshName" + - "node\022\035.RefreshNamenodesRequestProto\032\036.Re" + - "freshNamenodesResponseProto\022N\n\017deleteBlo", - "ckPool\022\034.DeleteBlockPoolRequestProto\032\035.D" + - "eleteBlockPoolResponseProto\022`\n\025getBlockL" + - "ocalPathInfo\022\".GetBlockLocalPathInfoRequ" + - "estProto\032#.GetBlockLocalPathInfoResponse" + - "ProtoBK\n%org.apache.hadoop.hdfs.protocol" + - ".protoB\034ClientDatanodeProtocolProtos\210\001\001\240" + - "\001\001" - }; - com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { - public com.google.protobuf.ExtensionRegistry assignDescriptors( - com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - internal_static_GetReplicaVisibleLengthRequestProto_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_GetReplicaVisibleLengthRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetReplicaVisibleLengthRequestProto_descriptor, - new java.lang.String[] { "Block", }, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.Builder.class); - internal_static_GetReplicaVisibleLengthResponseProto_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_GetReplicaVisibleLengthResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetReplicaVisibleLengthResponseProto_descriptor, - new java.lang.String[] { "Length", }, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.Builder.class); - internal_static_RefreshNamenodesRequestProto_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_RefreshNamenodesRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RefreshNamenodesRequestProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.Builder.class); - internal_static_RefreshNamenodesResponseProto_descriptor = - getDescriptor().getMessageTypes().get(3); - internal_static_RefreshNamenodesResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RefreshNamenodesResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.Builder.class); - internal_static_DeleteBlockPoolRequestProto_descriptor = - getDescriptor().getMessageTypes().get(4); - internal_static_DeleteBlockPoolRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_DeleteBlockPoolRequestProto_descriptor, - new java.lang.String[] { "BlockPool", "Force", }, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.Builder.class); - internal_static_DeleteBlockPoolResponseProto_descriptor = - getDescriptor().getMessageTypes().get(5); - internal_static_DeleteBlockPoolResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_DeleteBlockPoolResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.Builder.class); - internal_static_GetBlockLocalPathInfoRequestProto_descriptor = - getDescriptor().getMessageTypes().get(6); - internal_static_GetBlockLocalPathInfoRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetBlockLocalPathInfoRequestProto_descriptor, - new java.lang.String[] { "Block", "Token", }, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.Builder.class); - internal_static_GetBlockLocalPathInfoResponseProto_descriptor = - getDescriptor().getMessageTypes().get(7); - internal_static_GetBlockLocalPathInfoResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetBlockLocalPathInfoResponseProto_descriptor, - new java.lang.String[] { "Block", "LocalPath", "LocalMetaPath", }, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.Builder.class); - return null; - } - }; - com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(), - }, assigner); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/ClientNamenodeProtocolProtos.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/ClientNamenodeProtocolProtos.java deleted file mode 100644 index c6b7b2abba3..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/ClientNamenodeProtocolProtos.java +++ /dev/null @@ -1,46529 +0,0 @@ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: ClientNamenodeProtocol.proto - -package org.apache.hadoop.hdfs.protocol.proto; - -public final class ClientNamenodeProtocolProtos { - private ClientNamenodeProtocolProtos() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistry registry) { - } - public enum CreateFlag - implements com.google.protobuf.ProtocolMessageEnum { - CREATE(0, 1), - OVERWRITE(1, 2), - APPEND(2, 4), - ; - - public static final int CREATE_VALUE = 1; - public static final int OVERWRITE_VALUE = 2; - public static final int APPEND_VALUE = 4; - - - public final int getNumber() { return value; } - - public static CreateFlag valueOf(int value) { - switch (value) { - case 1: return CREATE; - case 2: return OVERWRITE; - case 4: return APPEND; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public CreateFlag findValueByNumber(int number) { - return CreateFlag.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.getDescriptor().getEnumTypes().get(0); - } - - private static final CreateFlag[] VALUES = { - CREATE, OVERWRITE, APPEND, - }; - - public static CreateFlag valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private CreateFlag(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:CreateFlag) - } - - public enum DatanodeReportType - implements com.google.protobuf.ProtocolMessageEnum { - ALL(0, 1), - LIVE(1, 3), - ; - - public static final DatanodeReportType DEAD = LIVE; - public static final int ALL_VALUE = 1; - public static final int LIVE_VALUE = 3; - public static final int DEAD_VALUE = 3; - - - public final int getNumber() { return value; } - - public static DatanodeReportType valueOf(int value) { - switch (value) { - case 1: return ALL; - case 3: return LIVE; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public DatanodeReportType findValueByNumber(int number) { - return DatanodeReportType.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.getDescriptor().getEnumTypes().get(1); - } - - private static final DatanodeReportType[] VALUES = { - ALL, LIVE, DEAD, - }; - - public static DatanodeReportType valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private DatanodeReportType(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:DatanodeReportType) - } - - public enum SafeModeAction - implements com.google.protobuf.ProtocolMessageEnum { - SAFEMODE_LEAVE(0, 1), - SAFEMODE_ENTER(1, 2), - SAFEMODE_GET(2, 3), - ; - - public static final int SAFEMODE_LEAVE_VALUE = 1; - public static final int SAFEMODE_ENTER_VALUE = 2; - public static final int SAFEMODE_GET_VALUE = 3; - - - public final int getNumber() { return value; } - - public static SafeModeAction valueOf(int value) { - switch (value) { - case 1: return SAFEMODE_LEAVE; - case 2: return SAFEMODE_ENTER; - case 3: return SAFEMODE_GET; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public SafeModeAction findValueByNumber(int number) { - return SafeModeAction.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.getDescriptor().getEnumTypes().get(2); - } - - private static final SafeModeAction[] VALUES = { - SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET, - }; - - public static SafeModeAction valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private SafeModeAction(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:SafeModeAction) - } - - public enum UpgradeAction - implements com.google.protobuf.ProtocolMessageEnum { - GET_STATUS(0, 1), - DETAILED_STATUS(1, 2), - FORCE_PROCEED(2, 3), - ; - - public static final int GET_STATUS_VALUE = 1; - public static final int DETAILED_STATUS_VALUE = 2; - public static final int FORCE_PROCEED_VALUE = 3; - - - public final int getNumber() { return value; } - - public static UpgradeAction valueOf(int value) { - switch (value) { - case 1: return GET_STATUS; - case 2: return DETAILED_STATUS; - case 3: return FORCE_PROCEED; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public UpgradeAction findValueByNumber(int number) { - return UpgradeAction.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.getDescriptor().getEnumTypes().get(3); - } - - private static final UpgradeAction[] VALUES = { - GET_STATUS, DETAILED_STATUS, FORCE_PROCEED, - }; - - public static UpgradeAction valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private UpgradeAction(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:UpgradeAction) - } - - public interface GetBlockLocationsRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required uint64 offset = 2; - boolean hasOffset(); - long getOffset(); - - // required uint64 length = 3; - boolean hasLength(); - long getLength(); - } - public static final class GetBlockLocationsRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetBlockLocationsRequestProtoOrBuilder { - // Use GetBlockLocationsRequestProto.newBuilder() to construct. - private GetBlockLocationsRequestProto(Builder builder) { - super(builder); - } - private GetBlockLocationsRequestProto(boolean noInit) {} - - private static final GetBlockLocationsRequestProto defaultInstance; - public static GetBlockLocationsRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetBlockLocationsRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetBlockLocationsRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetBlockLocationsRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required uint64 offset = 2; - public static final int OFFSET_FIELD_NUMBER = 2; - private long offset_; - public boolean hasOffset() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getOffset() { - return offset_; - } - - // required uint64 length = 3; - public static final int LENGTH_FIELD_NUMBER = 3; - private long length_; - public boolean hasLength() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getLength() { - return length_; - } - - private void initFields() { - src_ = ""; - offset_ = 0L; - length_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasOffset()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasLength()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, offset_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, length_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, offset_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, length_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasOffset() == other.hasOffset()); - if (hasOffset()) { - result = result && (getOffset() - == other.getOffset()); - } - result = result && (hasLength() == other.hasLength()); - if (hasLength()) { - result = result && (getLength() - == other.getLength()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasOffset()) { - hash = (37 * hash) + OFFSET_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getOffset()); - } - if (hasLength()) { - hash = (37 * hash) + LENGTH_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getLength()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetBlockLocationsRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetBlockLocationsRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - offset_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - length_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.offset_ = offset_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.length_ = length_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasOffset()) { - setOffset(other.getOffset()); - } - if (other.hasLength()) { - setLength(other.getLength()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasOffset()) { - - return false; - } - if (!hasLength()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - offset_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - length_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required uint64 offset = 2; - private long offset_ ; - public boolean hasOffset() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getOffset() { - return offset_; - } - public Builder setOffset(long value) { - bitField0_ |= 0x00000002; - offset_ = value; - onChanged(); - return this; - } - public Builder clearOffset() { - bitField0_ = (bitField0_ & ~0x00000002); - offset_ = 0L; - onChanged(); - return this; - } - - // required uint64 length = 3; - private long length_ ; - public boolean hasLength() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getLength() { - return length_; - } - public Builder setLength(long value) { - bitField0_ |= 0x00000004; - length_ = value; - onChanged(); - return this; - } - public Builder clearLength() { - bitField0_ = (bitField0_ & ~0x00000004); - length_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:GetBlockLocationsRequestProto) - } - - static { - defaultInstance = new GetBlockLocationsRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetBlockLocationsRequestProto) - } - - public interface GetBlockLocationsResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .LocatedBlocksProto locations = 1; - boolean hasLocations(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder(); - } - public static final class GetBlockLocationsResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetBlockLocationsResponseProtoOrBuilder { - // Use GetBlockLocationsResponseProto.newBuilder() to construct. - private GetBlockLocationsResponseProto(Builder builder) { - super(builder); - } - private GetBlockLocationsResponseProto(boolean noInit) {} - - private static final GetBlockLocationsResponseProto defaultInstance; - public static GetBlockLocationsResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetBlockLocationsResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetBlockLocationsResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetBlockLocationsResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .LocatedBlocksProto locations = 1; - public static final int LOCATIONS_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_; - public boolean hasLocations() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() { - return locations_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() { - return locations_; - } - - private void initFields() { - locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasLocations()) { - memoizedIsInitialized = 0; - return false; - } - if (!getLocations().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, locations_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, locations_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto) obj; - - boolean result = true; - result = result && (hasLocations() == other.hasLocations()); - if (hasLocations()) { - result = result && getLocations() - .equals(other.getLocations()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasLocations()) { - hash = (37 * hash) + LOCATIONS_FIELD_NUMBER; - hash = (53 * hash) + getLocations().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetBlockLocationsResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetBlockLocationsResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getLocationsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (locationsBuilder_ == null) { - locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); - } else { - locationsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (locationsBuilder_ == null) { - result.locations_ = locations_; - } else { - result.locations_ = locationsBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.getDefaultInstance()) return this; - if (other.hasLocations()) { - mergeLocations(other.getLocations()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasLocations()) { - - return false; - } - if (!getLocations().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder(); - if (hasLocations()) { - subBuilder.mergeFrom(getLocations()); - } - input.readMessage(subBuilder, extensionRegistry); - setLocations(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .LocatedBlocksProto locations = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> locationsBuilder_; - public boolean hasLocations() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() { - if (locationsBuilder_ == null) { - return locations_; - } else { - return locationsBuilder_.getMessage(); - } - } - public Builder setLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) { - if (locationsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - locations_ = value; - onChanged(); - } else { - locationsBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setLocations( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder builderForValue) { - if (locationsBuilder_ == null) { - locations_ = builderForValue.build(); - onChanged(); - } else { - locationsBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) { - if (locationsBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - locations_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) { - locations_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder(locations_).mergeFrom(value).buildPartial(); - } else { - locations_ = value; - } - onChanged(); - } else { - locationsBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearLocations() { - if (locationsBuilder_ == null) { - locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); - onChanged(); - } else { - locationsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder getLocationsBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getLocationsFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() { - if (locationsBuilder_ != null) { - return locationsBuilder_.getMessageOrBuilder(); - } else { - return locations_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> - getLocationsFieldBuilder() { - if (locationsBuilder_ == null) { - locationsBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder>( - locations_, - getParentForChildren(), - isClean()); - locations_ = null; - } - return locationsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:GetBlockLocationsResponseProto) - } - - static { - defaultInstance = new GetBlockLocationsResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetBlockLocationsResponseProto) - } - - public interface GetServerDefaultsRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class GetServerDefaultsRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetServerDefaultsRequestProtoOrBuilder { - // Use GetServerDefaultsRequestProto.newBuilder() to construct. - private GetServerDefaultsRequestProto(Builder builder) { - super(builder); - } - private GetServerDefaultsRequestProto(boolean noInit) {} - - private static final GetServerDefaultsRequestProto defaultInstance; - public static GetServerDefaultsRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetServerDefaultsRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetServerDefaultsRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetServerDefaultsRequestProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetServerDefaultsRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetServerDefaultsRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:GetServerDefaultsRequestProto) - } - - static { - defaultInstance = new GetServerDefaultsRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetServerDefaultsRequestProto) - } - - public interface GetServerDefaultsResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .FsServerDefaultsProto serverDefaults = 1; - boolean hasServerDefaults(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getServerDefaults(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder getServerDefaultsOrBuilder(); - } - public static final class GetServerDefaultsResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetServerDefaultsResponseProtoOrBuilder { - // Use GetServerDefaultsResponseProto.newBuilder() to construct. - private GetServerDefaultsResponseProto(Builder builder) { - super(builder); - } - private GetServerDefaultsResponseProto(boolean noInit) {} - - private static final GetServerDefaultsResponseProto defaultInstance; - public static GetServerDefaultsResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetServerDefaultsResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetServerDefaultsResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetServerDefaultsResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .FsServerDefaultsProto serverDefaults = 1; - public static final int SERVERDEFAULTS_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto serverDefaults_; - public boolean hasServerDefaults() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getServerDefaults() { - return serverDefaults_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder getServerDefaultsOrBuilder() { - return serverDefaults_; - } - - private void initFields() { - serverDefaults_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasServerDefaults()) { - memoizedIsInitialized = 0; - return false; - } - if (!getServerDefaults().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, serverDefaults_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, serverDefaults_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto) obj; - - boolean result = true; - result = result && (hasServerDefaults() == other.hasServerDefaults()); - if (hasServerDefaults()) { - result = result && getServerDefaults() - .equals(other.getServerDefaults()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasServerDefaults()) { - hash = (37 * hash) + SERVERDEFAULTS_FIELD_NUMBER; - hash = (53 * hash) + getServerDefaults().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetServerDefaultsResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetServerDefaultsResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getServerDefaultsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (serverDefaultsBuilder_ == null) { - serverDefaults_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance(); - } else { - serverDefaultsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (serverDefaultsBuilder_ == null) { - result.serverDefaults_ = serverDefaults_; - } else { - result.serverDefaults_ = serverDefaultsBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.getDefaultInstance()) return this; - if (other.hasServerDefaults()) { - mergeServerDefaults(other.getServerDefaults()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasServerDefaults()) { - - return false; - } - if (!getServerDefaults().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.newBuilder(); - if (hasServerDefaults()) { - subBuilder.mergeFrom(getServerDefaults()); - } - input.readMessage(subBuilder, extensionRegistry); - setServerDefaults(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .FsServerDefaultsProto serverDefaults = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto serverDefaults_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder> serverDefaultsBuilder_; - public boolean hasServerDefaults() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getServerDefaults() { - if (serverDefaultsBuilder_ == null) { - return serverDefaults_; - } else { - return serverDefaultsBuilder_.getMessage(); - } - } - public Builder setServerDefaults(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto value) { - if (serverDefaultsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - serverDefaults_ = value; - onChanged(); - } else { - serverDefaultsBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setServerDefaults( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder builderForValue) { - if (serverDefaultsBuilder_ == null) { - serverDefaults_ = builderForValue.build(); - onChanged(); - } else { - serverDefaultsBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeServerDefaults(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto value) { - if (serverDefaultsBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - serverDefaults_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance()) { - serverDefaults_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.newBuilder(serverDefaults_).mergeFrom(value).buildPartial(); - } else { - serverDefaults_ = value; - } - onChanged(); - } else { - serverDefaultsBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearServerDefaults() { - if (serverDefaultsBuilder_ == null) { - serverDefaults_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance(); - onChanged(); - } else { - serverDefaultsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder getServerDefaultsBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getServerDefaultsFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder getServerDefaultsOrBuilder() { - if (serverDefaultsBuilder_ != null) { - return serverDefaultsBuilder_.getMessageOrBuilder(); - } else { - return serverDefaults_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder> - getServerDefaultsFieldBuilder() { - if (serverDefaultsBuilder_ == null) { - serverDefaultsBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder>( - serverDefaults_, - getParentForChildren(), - isClean()); - serverDefaults_ = null; - } - return serverDefaultsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:GetServerDefaultsResponseProto) - } - - static { - defaultInstance = new GetServerDefaultsResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetServerDefaultsResponseProto) - } - - public interface CreateRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required .FsPermissionProto masked = 2; - boolean hasMasked(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getMasked(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getMaskedOrBuilder(); - - // required string clientName = 3; - boolean hasClientName(); - String getClientName(); - - // required uint32 createFlag = 4; - boolean hasCreateFlag(); - int getCreateFlag(); - - // required bool createParent = 5; - boolean hasCreateParent(); - boolean getCreateParent(); - - // required uint32 replication = 6; - boolean hasReplication(); - int getReplication(); - - // required uint64 blockSize = 7; - boolean hasBlockSize(); - long getBlockSize(); - } - public static final class CreateRequestProto extends - com.google.protobuf.GeneratedMessage - implements CreateRequestProtoOrBuilder { - // Use CreateRequestProto.newBuilder() to construct. - private CreateRequestProto(Builder builder) { - super(builder); - } - private CreateRequestProto(boolean noInit) {} - - private static final CreateRequestProto defaultInstance; - public static CreateRequestProto getDefaultInstance() { - return defaultInstance; - } - - public CreateRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required .FsPermissionProto masked = 2; - public static final int MASKED_FIELD_NUMBER = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto masked_; - public boolean hasMasked() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getMasked() { - return masked_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getMaskedOrBuilder() { - return masked_; - } - - // required string clientName = 3; - public static final int CLIENTNAME_FIELD_NUMBER = 3; - private java.lang.Object clientName_; - public boolean hasClientName() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - clientName_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getClientNameBytes() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - clientName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required uint32 createFlag = 4; - public static final int CREATEFLAG_FIELD_NUMBER = 4; - private int createFlag_; - public boolean hasCreateFlag() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public int getCreateFlag() { - return createFlag_; - } - - // required bool createParent = 5; - public static final int CREATEPARENT_FIELD_NUMBER = 5; - private boolean createParent_; - public boolean hasCreateParent() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public boolean getCreateParent() { - return createParent_; - } - - // required uint32 replication = 6; - public static final int REPLICATION_FIELD_NUMBER = 6; - private int replication_; - public boolean hasReplication() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public int getReplication() { - return replication_; - } - - // required uint64 blockSize = 7; - public static final int BLOCKSIZE_FIELD_NUMBER = 7; - private long blockSize_; - public boolean hasBlockSize() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - public long getBlockSize() { - return blockSize_; - } - - private void initFields() { - src_ = ""; - masked_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - clientName_ = ""; - createFlag_ = 0; - createParent_ = false; - replication_ = 0; - blockSize_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasMasked()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasClientName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCreateFlag()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCreateParent()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasReplication()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBlockSize()) { - memoizedIsInitialized = 0; - return false; - } - if (!getMasked().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, masked_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getClientNameBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt32(4, createFlag_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBool(5, createParent_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeUInt32(6, replication_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeUInt64(7, blockSize_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, masked_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getClientNameBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(4, createFlag_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(5, createParent_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(6, replication_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(7, blockSize_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasMasked() == other.hasMasked()); - if (hasMasked()) { - result = result && getMasked() - .equals(other.getMasked()); - } - result = result && (hasClientName() == other.hasClientName()); - if (hasClientName()) { - result = result && getClientName() - .equals(other.getClientName()); - } - result = result && (hasCreateFlag() == other.hasCreateFlag()); - if (hasCreateFlag()) { - result = result && (getCreateFlag() - == other.getCreateFlag()); - } - result = result && (hasCreateParent() == other.hasCreateParent()); - if (hasCreateParent()) { - result = result && (getCreateParent() - == other.getCreateParent()); - } - result = result && (hasReplication() == other.hasReplication()); - if (hasReplication()) { - result = result && (getReplication() - == other.getReplication()); - } - result = result && (hasBlockSize() == other.hasBlockSize()); - if (hasBlockSize()) { - result = result && (getBlockSize() - == other.getBlockSize()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasMasked()) { - hash = (37 * hash) + MASKED_FIELD_NUMBER; - hash = (53 * hash) + getMasked().hashCode(); - } - if (hasClientName()) { - hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; - hash = (53 * hash) + getClientName().hashCode(); - } - if (hasCreateFlag()) { - hash = (37 * hash) + CREATEFLAG_FIELD_NUMBER; - hash = (53 * hash) + getCreateFlag(); - } - if (hasCreateParent()) { - hash = (37 * hash) + CREATEPARENT_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getCreateParent()); - } - if (hasReplication()) { - hash = (37 * hash) + REPLICATION_FIELD_NUMBER; - hash = (53 * hash) + getReplication(); - } - if (hasBlockSize()) { - hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getBlockSize()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getMaskedFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - if (maskedBuilder_ == null) { - masked_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - } else { - maskedBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - clientName_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - createFlag_ = 0; - bitField0_ = (bitField0_ & ~0x00000008); - createParent_ = false; - bitField0_ = (bitField0_ & ~0x00000010); - replication_ = 0; - bitField0_ = (bitField0_ & ~0x00000020); - blockSize_ = 0L; - bitField0_ = (bitField0_ & ~0x00000040); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (maskedBuilder_ == null) { - result.masked_ = masked_; - } else { - result.masked_ = maskedBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.clientName_ = clientName_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.createFlag_ = createFlag_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.createParent_ = createParent_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.replication_ = replication_; - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000040; - } - result.blockSize_ = blockSize_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasMasked()) { - mergeMasked(other.getMasked()); - } - if (other.hasClientName()) { - setClientName(other.getClientName()); - } - if (other.hasCreateFlag()) { - setCreateFlag(other.getCreateFlag()); - } - if (other.hasCreateParent()) { - setCreateParent(other.getCreateParent()); - } - if (other.hasReplication()) { - setReplication(other.getReplication()); - } - if (other.hasBlockSize()) { - setBlockSize(other.getBlockSize()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasMasked()) { - - return false; - } - if (!hasClientName()) { - - return false; - } - if (!hasCreateFlag()) { - - return false; - } - if (!hasCreateParent()) { - - return false; - } - if (!hasReplication()) { - - return false; - } - if (!hasBlockSize()) { - - return false; - } - if (!getMasked().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 18: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder(); - if (hasMasked()) { - subBuilder.mergeFrom(getMasked()); - } - input.readMessage(subBuilder, extensionRegistry); - setMasked(subBuilder.buildPartial()); - break; - } - case 26: { - bitField0_ |= 0x00000004; - clientName_ = input.readBytes(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - createFlag_ = input.readUInt32(); - break; - } - case 40: { - bitField0_ |= 0x00000010; - createParent_ = input.readBool(); - break; - } - case 48: { - bitField0_ |= 0x00000020; - replication_ = input.readUInt32(); - break; - } - case 56: { - bitField0_ |= 0x00000040; - blockSize_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required .FsPermissionProto masked = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto masked_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> maskedBuilder_; - public boolean hasMasked() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getMasked() { - if (maskedBuilder_ == null) { - return masked_; - } else { - return maskedBuilder_.getMessage(); - } - } - public Builder setMasked(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) { - if (maskedBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - masked_ = value; - onChanged(); - } else { - maskedBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setMasked( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder builderForValue) { - if (maskedBuilder_ == null) { - masked_ = builderForValue.build(); - onChanged(); - } else { - maskedBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergeMasked(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) { - if (maskedBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - masked_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance()) { - masked_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder(masked_).mergeFrom(value).buildPartial(); - } else { - masked_ = value; - } - onChanged(); - } else { - maskedBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearMasked() { - if (maskedBuilder_ == null) { - masked_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - onChanged(); - } else { - maskedBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder getMaskedBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getMaskedFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getMaskedOrBuilder() { - if (maskedBuilder_ != null) { - return maskedBuilder_.getMessageOrBuilder(); - } else { - return masked_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> - getMaskedFieldBuilder() { - if (maskedBuilder_ == null) { - maskedBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder>( - masked_, - getParentForChildren(), - isClean()); - masked_ = null; - } - return maskedBuilder_; - } - - // required string clientName = 3; - private java.lang.Object clientName_ = ""; - public boolean hasClientName() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - clientName_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setClientName(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - clientName_ = value; - onChanged(); - return this; - } - public Builder clearClientName() { - bitField0_ = (bitField0_ & ~0x00000004); - clientName_ = getDefaultInstance().getClientName(); - onChanged(); - return this; - } - void setClientName(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000004; - clientName_ = value; - onChanged(); - } - - // required uint32 createFlag = 4; - private int createFlag_ ; - public boolean hasCreateFlag() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public int getCreateFlag() { - return createFlag_; - } - public Builder setCreateFlag(int value) { - bitField0_ |= 0x00000008; - createFlag_ = value; - onChanged(); - return this; - } - public Builder clearCreateFlag() { - bitField0_ = (bitField0_ & ~0x00000008); - createFlag_ = 0; - onChanged(); - return this; - } - - // required bool createParent = 5; - private boolean createParent_ ; - public boolean hasCreateParent() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public boolean getCreateParent() { - return createParent_; - } - public Builder setCreateParent(boolean value) { - bitField0_ |= 0x00000010; - createParent_ = value; - onChanged(); - return this; - } - public Builder clearCreateParent() { - bitField0_ = (bitField0_ & ~0x00000010); - createParent_ = false; - onChanged(); - return this; - } - - // required uint32 replication = 6; - private int replication_ ; - public boolean hasReplication() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public int getReplication() { - return replication_; - } - public Builder setReplication(int value) { - bitField0_ |= 0x00000020; - replication_ = value; - onChanged(); - return this; - } - public Builder clearReplication() { - bitField0_ = (bitField0_ & ~0x00000020); - replication_ = 0; - onChanged(); - return this; - } - - // required uint64 blockSize = 7; - private long blockSize_ ; - public boolean hasBlockSize() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - public long getBlockSize() { - return blockSize_; - } - public Builder setBlockSize(long value) { - bitField0_ |= 0x00000040; - blockSize_ = value; - onChanged(); - return this; - } - public Builder clearBlockSize() { - bitField0_ = (bitField0_ & ~0x00000040); - blockSize_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:CreateRequestProto) - } - - static { - defaultInstance = new CreateRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:CreateRequestProto) - } - - public interface CreateResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class CreateResponseProto extends - com.google.protobuf.GeneratedMessage - implements CreateResponseProtoOrBuilder { - // Use CreateResponseProto.newBuilder() to construct. - private CreateResponseProto(Builder builder) { - super(builder); - } - private CreateResponseProto(boolean noInit) {} - - private static final CreateResponseProto defaultInstance; - public static CreateResponseProto getDefaultInstance() { - return defaultInstance; - } - - public CreateResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:CreateResponseProto) - } - - static { - defaultInstance = new CreateResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:CreateResponseProto) - } - - public interface AppendRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required string clientName = 2; - boolean hasClientName(); - String getClientName(); - } - public static final class AppendRequestProto extends - com.google.protobuf.GeneratedMessage - implements AppendRequestProtoOrBuilder { - // Use AppendRequestProto.newBuilder() to construct. - private AppendRequestProto(Builder builder) { - super(builder); - } - private AppendRequestProto(boolean noInit) {} - - private static final AppendRequestProto defaultInstance; - public static AppendRequestProto getDefaultInstance() { - return defaultInstance; - } - - public AppendRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AppendRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AppendRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string clientName = 2; - public static final int CLIENTNAME_FIELD_NUMBER = 2; - private java.lang.Object clientName_; - public boolean hasClientName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - clientName_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getClientNameBytes() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - clientName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - src_ = ""; - clientName_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasClientName()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getClientNameBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getClientNameBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasClientName() == other.hasClientName()); - if (hasClientName()) { - result = result && getClientName() - .equals(other.getClientName()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasClientName()) { - hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; - hash = (53 * hash) + getClientName().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AppendRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AppendRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - clientName_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.clientName_ = clientName_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasClientName()) { - setClientName(other.getClientName()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasClientName()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - clientName_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required string clientName = 2; - private java.lang.Object clientName_ = ""; - public boolean hasClientName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - clientName_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setClientName(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - clientName_ = value; - onChanged(); - return this; - } - public Builder clearClientName() { - bitField0_ = (bitField0_ & ~0x00000002); - clientName_ = getDefaultInstance().getClientName(); - onChanged(); - return this; - } - void setClientName(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - clientName_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:AppendRequestProto) - } - - static { - defaultInstance = new AppendRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:AppendRequestProto) - } - - public interface AppendResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .LocatedBlockProto block = 1; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder(); - } - public static final class AppendResponseProto extends - com.google.protobuf.GeneratedMessage - implements AppendResponseProtoOrBuilder { - // Use AppendResponseProto.newBuilder() to construct. - private AppendResponseProto(Builder builder) { - super(builder); - } - private AppendResponseProto(boolean noInit) {} - - private static final AppendResponseProto defaultInstance; - public static AppendResponseProto getDefaultInstance() { - return defaultInstance; - } - - public AppendResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AppendResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AppendResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .LocatedBlockProto block = 1; - public static final int BLOCK_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - private void initFields() { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, block_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, block_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto) obj; - - boolean result = true; - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AppendResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AppendResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.getDefaultInstance()) return this; - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlock()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .LocatedBlockProto block = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // @@protoc_insertion_point(builder_scope:AppendResponseProto) - } - - static { - defaultInstance = new AppendResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:AppendResponseProto) - } - - public interface SetReplicationRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required uint32 replication = 2; - boolean hasReplication(); - int getReplication(); - } - public static final class SetReplicationRequestProto extends - com.google.protobuf.GeneratedMessage - implements SetReplicationRequestProtoOrBuilder { - // Use SetReplicationRequestProto.newBuilder() to construct. - private SetReplicationRequestProto(Builder builder) { - super(builder); - } - private SetReplicationRequestProto(boolean noInit) {} - - private static final SetReplicationRequestProto defaultInstance; - public static SetReplicationRequestProto getDefaultInstance() { - return defaultInstance; - } - - public SetReplicationRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetReplicationRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetReplicationRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required uint32 replication = 2; - public static final int REPLICATION_FIELD_NUMBER = 2; - private int replication_; - public boolean hasReplication() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getReplication() { - return replication_; - } - - private void initFields() { - src_ = ""; - replication_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasReplication()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt32(2, replication_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(2, replication_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasReplication() == other.hasReplication()); - if (hasReplication()) { - result = result && (getReplication() - == other.getReplication()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasReplication()) { - hash = (37 * hash) + REPLICATION_FIELD_NUMBER; - hash = (53 * hash) + getReplication(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetReplicationRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetReplicationRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - replication_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.replication_ = replication_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasReplication()) { - setReplication(other.getReplication()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasReplication()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - replication_ = input.readUInt32(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required uint32 replication = 2; - private int replication_ ; - public boolean hasReplication() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getReplication() { - return replication_; - } - public Builder setReplication(int value) { - bitField0_ |= 0x00000002; - replication_ = value; - onChanged(); - return this; - } - public Builder clearReplication() { - bitField0_ = (bitField0_ & ~0x00000002); - replication_ = 0; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:SetReplicationRequestProto) - } - - static { - defaultInstance = new SetReplicationRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SetReplicationRequestProto) - } - - public interface SetReplicationResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bool result = 1; - boolean hasResult(); - boolean getResult(); - } - public static final class SetReplicationResponseProto extends - com.google.protobuf.GeneratedMessage - implements SetReplicationResponseProtoOrBuilder { - // Use SetReplicationResponseProto.newBuilder() to construct. - private SetReplicationResponseProto(Builder builder) { - super(builder); - } - private SetReplicationResponseProto(boolean noInit) {} - - private static final SetReplicationResponseProto defaultInstance; - public static SetReplicationResponseProto getDefaultInstance() { - return defaultInstance; - } - - public SetReplicationResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetReplicationResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetReplicationResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required bool result = 1; - public static final int RESULT_FIELD_NUMBER = 1; - private boolean result_; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - - private void initFields() { - result_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasResult()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, result_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, result_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto) obj; - - boolean result = true; - result = result && (hasResult() == other.hasResult()); - if (hasResult()) { - result = result && (getResult() - == other.getResult()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasResult()) { - hash = (37 * hash) + RESULT_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getResult()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetReplicationResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetReplicationResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - result_ = false; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.result_ = result_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.getDefaultInstance()) return this; - if (other.hasResult()) { - setResult(other.getResult()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasResult()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - result_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required bool result = 1; - private boolean result_ ; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - public Builder setResult(boolean value) { - bitField0_ |= 0x00000001; - result_ = value; - onChanged(); - return this; - } - public Builder clearResult() { - bitField0_ = (bitField0_ & ~0x00000001); - result_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:SetReplicationResponseProto) - } - - static { - defaultInstance = new SetReplicationResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SetReplicationResponseProto) - } - - public interface SetPermissionRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required .FsPermissionProto permission = 2; - boolean hasPermission(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder(); - } - public static final class SetPermissionRequestProto extends - com.google.protobuf.GeneratedMessage - implements SetPermissionRequestProtoOrBuilder { - // Use SetPermissionRequestProto.newBuilder() to construct. - private SetPermissionRequestProto(Builder builder) { - super(builder); - } - private SetPermissionRequestProto(boolean noInit) {} - - private static final SetPermissionRequestProto defaultInstance; - public static SetPermissionRequestProto getDefaultInstance() { - return defaultInstance; - } - - public SetPermissionRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetPermissionRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetPermissionRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required .FsPermissionProto permission = 2; - public static final int PERMISSION_FIELD_NUMBER = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto permission_; - public boolean hasPermission() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission() { - return permission_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { - return permission_; - } - - private void initFields() { - src_ = ""; - permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasPermission()) { - memoizedIsInitialized = 0; - return false; - } - if (!getPermission().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, permission_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, permission_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasPermission() == other.hasPermission()); - if (hasPermission()) { - result = result && getPermission() - .equals(other.getPermission()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasPermission()) { - hash = (37 * hash) + PERMISSION_FIELD_NUMBER; - hash = (53 * hash) + getPermission().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetPermissionRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetPermissionRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getPermissionFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - if (permissionBuilder_ == null) { - permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - } else { - permissionBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (permissionBuilder_ == null) { - result.permission_ = permission_; - } else { - result.permission_ = permissionBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasPermission()) { - mergePermission(other.getPermission()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasPermission()) { - - return false; - } - if (!getPermission().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 18: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder(); - if (hasPermission()) { - subBuilder.mergeFrom(getPermission()); - } - input.readMessage(subBuilder, extensionRegistry); - setPermission(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required .FsPermissionProto permission = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> permissionBuilder_; - public boolean hasPermission() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission() { - if (permissionBuilder_ == null) { - return permission_; - } else { - return permissionBuilder_.getMessage(); - } - } - public Builder setPermission(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) { - if (permissionBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - permission_ = value; - onChanged(); - } else { - permissionBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setPermission( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder builderForValue) { - if (permissionBuilder_ == null) { - permission_ = builderForValue.build(); - onChanged(); - } else { - permissionBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergePermission(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) { - if (permissionBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - permission_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance()) { - permission_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder(permission_).mergeFrom(value).buildPartial(); - } else { - permission_ = value; - } - onChanged(); - } else { - permissionBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearPermission() { - if (permissionBuilder_ == null) { - permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - onChanged(); - } else { - permissionBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder getPermissionBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getPermissionFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { - if (permissionBuilder_ != null) { - return permissionBuilder_.getMessageOrBuilder(); - } else { - return permission_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> - getPermissionFieldBuilder() { - if (permissionBuilder_ == null) { - permissionBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder>( - permission_, - getParentForChildren(), - isClean()); - permission_ = null; - } - return permissionBuilder_; - } - - // @@protoc_insertion_point(builder_scope:SetPermissionRequestProto) - } - - static { - defaultInstance = new SetPermissionRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SetPermissionRequestProto) - } - - public interface SetPermissionResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class SetPermissionResponseProto extends - com.google.protobuf.GeneratedMessage - implements SetPermissionResponseProtoOrBuilder { - // Use SetPermissionResponseProto.newBuilder() to construct. - private SetPermissionResponseProto(Builder builder) { - super(builder); - } - private SetPermissionResponseProto(boolean noInit) {} - - private static final SetPermissionResponseProto defaultInstance; - public static SetPermissionResponseProto getDefaultInstance() { - return defaultInstance; - } - - public SetPermissionResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetPermissionResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetPermissionResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetPermissionResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetPermissionResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:SetPermissionResponseProto) - } - - static { - defaultInstance = new SetPermissionResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SetPermissionResponseProto) - } - - public interface SetOwnerRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required string username = 2; - boolean hasUsername(); - String getUsername(); - - // required string groupname = 3; - boolean hasGroupname(); - String getGroupname(); - } - public static final class SetOwnerRequestProto extends - com.google.protobuf.GeneratedMessage - implements SetOwnerRequestProtoOrBuilder { - // Use SetOwnerRequestProto.newBuilder() to construct. - private SetOwnerRequestProto(Builder builder) { - super(builder); - } - private SetOwnerRequestProto(boolean noInit) {} - - private static final SetOwnerRequestProto defaultInstance; - public static SetOwnerRequestProto getDefaultInstance() { - return defaultInstance; - } - - public SetOwnerRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetOwnerRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetOwnerRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string username = 2; - public static final int USERNAME_FIELD_NUMBER = 2; - private java.lang.Object username_; - public boolean hasUsername() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getUsername() { - java.lang.Object ref = username_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - username_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getUsernameBytes() { - java.lang.Object ref = username_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - username_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string groupname = 3; - public static final int GROUPNAME_FIELD_NUMBER = 3; - private java.lang.Object groupname_; - public boolean hasGroupname() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getGroupname() { - java.lang.Object ref = groupname_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - groupname_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getGroupnameBytes() { - java.lang.Object ref = groupname_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - groupname_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - src_ = ""; - username_ = ""; - groupname_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasUsername()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasGroupname()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getUsernameBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getGroupnameBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getUsernameBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getGroupnameBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasUsername() == other.hasUsername()); - if (hasUsername()) { - result = result && getUsername() - .equals(other.getUsername()); - } - result = result && (hasGroupname() == other.hasGroupname()); - if (hasGroupname()) { - result = result && getGroupname() - .equals(other.getGroupname()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasUsername()) { - hash = (37 * hash) + USERNAME_FIELD_NUMBER; - hash = (53 * hash) + getUsername().hashCode(); - } - if (hasGroupname()) { - hash = (37 * hash) + GROUPNAME_FIELD_NUMBER; - hash = (53 * hash) + getGroupname().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetOwnerRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetOwnerRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - username_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - groupname_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.username_ = username_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.groupname_ = groupname_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasUsername()) { - setUsername(other.getUsername()); - } - if (other.hasGroupname()) { - setGroupname(other.getGroupname()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasUsername()) { - - return false; - } - if (!hasGroupname()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - username_ = input.readBytes(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - groupname_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required string username = 2; - private java.lang.Object username_ = ""; - public boolean hasUsername() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getUsername() { - java.lang.Object ref = username_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - username_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setUsername(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - username_ = value; - onChanged(); - return this; - } - public Builder clearUsername() { - bitField0_ = (bitField0_ & ~0x00000002); - username_ = getDefaultInstance().getUsername(); - onChanged(); - return this; - } - void setUsername(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - username_ = value; - onChanged(); - } - - // required string groupname = 3; - private java.lang.Object groupname_ = ""; - public boolean hasGroupname() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getGroupname() { - java.lang.Object ref = groupname_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - groupname_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setGroupname(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - groupname_ = value; - onChanged(); - return this; - } - public Builder clearGroupname() { - bitField0_ = (bitField0_ & ~0x00000004); - groupname_ = getDefaultInstance().getGroupname(); - onChanged(); - return this; - } - void setGroupname(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000004; - groupname_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:SetOwnerRequestProto) - } - - static { - defaultInstance = new SetOwnerRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SetOwnerRequestProto) - } - - public interface SetOwnerResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class SetOwnerResponseProto extends - com.google.protobuf.GeneratedMessage - implements SetOwnerResponseProtoOrBuilder { - // Use SetOwnerResponseProto.newBuilder() to construct. - private SetOwnerResponseProto(Builder builder) { - super(builder); - } - private SetOwnerResponseProto(boolean noInit) {} - - private static final SetOwnerResponseProto defaultInstance; - public static SetOwnerResponseProto getDefaultInstance() { - return defaultInstance; - } - - public SetOwnerResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetOwnerResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetOwnerResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetOwnerResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetOwnerResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:SetOwnerResponseProto) - } - - static { - defaultInstance = new SetOwnerResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SetOwnerResponseProto) - } - - public interface AbandonBlockRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ExtendedBlockProto b = 1; - boolean hasB(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder(); - - // required string src = 2; - boolean hasSrc(); - String getSrc(); - - // required string holder = 3; - boolean hasHolder(); - String getHolder(); - } - public static final class AbandonBlockRequestProto extends - com.google.protobuf.GeneratedMessage - implements AbandonBlockRequestProtoOrBuilder { - // Use AbandonBlockRequestProto.newBuilder() to construct. - private AbandonBlockRequestProto(Builder builder) { - super(builder); - } - private AbandonBlockRequestProto(boolean noInit) {} - - private static final AbandonBlockRequestProto defaultInstance; - public static AbandonBlockRequestProto getDefaultInstance() { - return defaultInstance; - } - - public AbandonBlockRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AbandonBlockRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AbandonBlockRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ExtendedBlockProto b = 1; - public static final int B_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_; - public boolean hasB() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() { - return b_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() { - return b_; - } - - // required string src = 2; - public static final int SRC_FIELD_NUMBER = 2; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string holder = 3; - public static final int HOLDER_FIELD_NUMBER = 3; - private java.lang.Object holder_; - public boolean hasHolder() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getHolder() { - java.lang.Object ref = holder_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - holder_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getHolderBytes() { - java.lang.Object ref = holder_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - holder_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - src_ = ""; - holder_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasB()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasHolder()) { - memoizedIsInitialized = 0; - return false; - } - if (!getB().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, b_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getSrcBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getHolderBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, b_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getSrcBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getHolderBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto) obj; - - boolean result = true; - result = result && (hasB() == other.hasB()); - if (hasB()) { - result = result && getB() - .equals(other.getB()); - } - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasHolder() == other.hasHolder()); - if (hasHolder()) { - result = result && getHolder() - .equals(other.getHolder()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasB()) { - hash = (37 * hash) + B_FIELD_NUMBER; - hash = (53 * hash) + getB().hashCode(); - } - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasHolder()) { - hash = (37 * hash) + HOLDER_FIELD_NUMBER; - hash = (53 * hash) + getHolder().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AbandonBlockRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AbandonBlockRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (bBuilder_ == null) { - b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } else { - bBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - holder_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (bBuilder_ == null) { - result.b_ = b_; - } else { - result.b_ = bBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.holder_ = holder_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.getDefaultInstance()) return this; - if (other.hasB()) { - mergeB(other.getB()); - } - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasHolder()) { - setHolder(other.getHolder()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasB()) { - - return false; - } - if (!hasSrc()) { - - return false; - } - if (!hasHolder()) { - - return false; - } - if (!getB().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); - if (hasB()) { - subBuilder.mergeFrom(getB()); - } - input.readMessage(subBuilder, extensionRegistry); - setB(subBuilder.buildPartial()); - break; - } - case 18: { - bitField0_ |= 0x00000002; - src_ = input.readBytes(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - holder_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required .ExtendedBlockProto b = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> bBuilder_; - public boolean hasB() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() { - if (bBuilder_ == null) { - return b_; - } else { - return bBuilder_.getMessage(); - } - } - public Builder setB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (bBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - b_ = value; - onChanged(); - } else { - bBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setB( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { - if (bBuilder_ == null) { - b_ = builderForValue.build(); - onChanged(); - } else { - bBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (bBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - b_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { - b_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(b_).mergeFrom(value).buildPartial(); - } else { - b_ = value; - } - onChanged(); - } else { - bBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearB() { - if (bBuilder_ == null) { - b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - onChanged(); - } else { - bBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() { - if (bBuilder_ != null) { - return bBuilder_.getMessageOrBuilder(); - } else { - return b_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> - getBFieldBuilder() { - if (bBuilder_ == null) { - bBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( - b_, - getParentForChildren(), - isClean()); - b_ = null; - } - return bBuilder_; - } - - // required string src = 2; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000002); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - src_ = value; - onChanged(); - } - - // required string holder = 3; - private java.lang.Object holder_ = ""; - public boolean hasHolder() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getHolder() { - java.lang.Object ref = holder_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - holder_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setHolder(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - holder_ = value; - onChanged(); - return this; - } - public Builder clearHolder() { - bitField0_ = (bitField0_ & ~0x00000004); - holder_ = getDefaultInstance().getHolder(); - onChanged(); - return this; - } - void setHolder(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000004; - holder_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:AbandonBlockRequestProto) - } - - static { - defaultInstance = new AbandonBlockRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:AbandonBlockRequestProto) - } - - public interface AbandonBlockResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class AbandonBlockResponseProto extends - com.google.protobuf.GeneratedMessage - implements AbandonBlockResponseProtoOrBuilder { - // Use AbandonBlockResponseProto.newBuilder() to construct. - private AbandonBlockResponseProto(Builder builder) { - super(builder); - } - private AbandonBlockResponseProto(boolean noInit) {} - - private static final AbandonBlockResponseProto defaultInstance; - public static AbandonBlockResponseProto getDefaultInstance() { - return defaultInstance; - } - - public AbandonBlockResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AbandonBlockResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AbandonBlockResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AbandonBlockResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AbandonBlockResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:AbandonBlockResponseProto) - } - - static { - defaultInstance = new AbandonBlockResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:AbandonBlockResponseProto) - } - - public interface AddBlockRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required string clientName = 2; - boolean hasClientName(); - String getClientName(); - - // required .ExtendedBlockProto previous = 3; - boolean hasPrevious(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getPrevious(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getPreviousOrBuilder(); - - // repeated .DatanodeInfoProto excludeNodes = 4; - java.util.List - getExcludeNodesList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExcludeNodes(int index); - int getExcludeNodesCount(); - java.util.List - getExcludeNodesOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExcludeNodesOrBuilder( - int index); - } - public static final class AddBlockRequestProto extends - com.google.protobuf.GeneratedMessage - implements AddBlockRequestProtoOrBuilder { - // Use AddBlockRequestProto.newBuilder() to construct. - private AddBlockRequestProto(Builder builder) { - super(builder); - } - private AddBlockRequestProto(boolean noInit) {} - - private static final AddBlockRequestProto defaultInstance; - public static AddBlockRequestProto getDefaultInstance() { - return defaultInstance; - } - - public AddBlockRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AddBlockRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AddBlockRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string clientName = 2; - public static final int CLIENTNAME_FIELD_NUMBER = 2; - private java.lang.Object clientName_; - public boolean hasClientName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - clientName_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getClientNameBytes() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - clientName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required .ExtendedBlockProto previous = 3; - public static final int PREVIOUS_FIELD_NUMBER = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto previous_; - public boolean hasPrevious() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getPrevious() { - return previous_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getPreviousOrBuilder() { - return previous_; - } - - // repeated .DatanodeInfoProto excludeNodes = 4; - public static final int EXCLUDENODES_FIELD_NUMBER = 4; - private java.util.List excludeNodes_; - public java.util.List getExcludeNodesList() { - return excludeNodes_; - } - public java.util.List - getExcludeNodesOrBuilderList() { - return excludeNodes_; - } - public int getExcludeNodesCount() { - return excludeNodes_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExcludeNodes(int index) { - return excludeNodes_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExcludeNodesOrBuilder( - int index) { - return excludeNodes_.get(index); - } - - private void initFields() { - src_ = ""; - clientName_ = ""; - previous_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - excludeNodes_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasClientName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasPrevious()) { - memoizedIsInitialized = 0; - return false; - } - if (!getPrevious().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getExcludeNodesCount(); i++) { - if (!getExcludeNodes(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getClientNameBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(3, previous_); - } - for (int i = 0; i < excludeNodes_.size(); i++) { - output.writeMessage(4, excludeNodes_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getClientNameBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, previous_); - } - for (int i = 0; i < excludeNodes_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, excludeNodes_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasClientName() == other.hasClientName()); - if (hasClientName()) { - result = result && getClientName() - .equals(other.getClientName()); - } - result = result && (hasPrevious() == other.hasPrevious()); - if (hasPrevious()) { - result = result && getPrevious() - .equals(other.getPrevious()); - } - result = result && getExcludeNodesList() - .equals(other.getExcludeNodesList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasClientName()) { - hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; - hash = (53 * hash) + getClientName().hashCode(); - } - if (hasPrevious()) { - hash = (37 * hash) + PREVIOUS_FIELD_NUMBER; - hash = (53 * hash) + getPrevious().hashCode(); - } - if (getExcludeNodesCount() > 0) { - hash = (37 * hash) + EXCLUDENODES_FIELD_NUMBER; - hash = (53 * hash) + getExcludeNodesList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AddBlockRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AddBlockRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getPreviousFieldBuilder(); - getExcludeNodesFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - clientName_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - if (previousBuilder_ == null) { - previous_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } else { - previousBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - if (excludeNodesBuilder_ == null) { - excludeNodes_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - } else { - excludeNodesBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.clientName_ = clientName_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - if (previousBuilder_ == null) { - result.previous_ = previous_; - } else { - result.previous_ = previousBuilder_.build(); - } - if (excludeNodesBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008)) { - excludeNodes_ = java.util.Collections.unmodifiableList(excludeNodes_); - bitField0_ = (bitField0_ & ~0x00000008); - } - result.excludeNodes_ = excludeNodes_; - } else { - result.excludeNodes_ = excludeNodesBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasClientName()) { - setClientName(other.getClientName()); - } - if (other.hasPrevious()) { - mergePrevious(other.getPrevious()); - } - if (excludeNodesBuilder_ == null) { - if (!other.excludeNodes_.isEmpty()) { - if (excludeNodes_.isEmpty()) { - excludeNodes_ = other.excludeNodes_; - bitField0_ = (bitField0_ & ~0x00000008); - } else { - ensureExcludeNodesIsMutable(); - excludeNodes_.addAll(other.excludeNodes_); - } - onChanged(); - } - } else { - if (!other.excludeNodes_.isEmpty()) { - if (excludeNodesBuilder_.isEmpty()) { - excludeNodesBuilder_.dispose(); - excludeNodesBuilder_ = null; - excludeNodes_ = other.excludeNodes_; - bitField0_ = (bitField0_ & ~0x00000008); - excludeNodesBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getExcludeNodesFieldBuilder() : null; - } else { - excludeNodesBuilder_.addAllMessages(other.excludeNodes_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasClientName()) { - - return false; - } - if (!hasPrevious()) { - - return false; - } - if (!getPrevious().isInitialized()) { - - return false; - } - for (int i = 0; i < getExcludeNodesCount(); i++) { - if (!getExcludeNodes(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - clientName_ = input.readBytes(); - break; - } - case 26: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); - if (hasPrevious()) { - subBuilder.mergeFrom(getPrevious()); - } - input.readMessage(subBuilder, extensionRegistry); - setPrevious(subBuilder.buildPartial()); - break; - } - case 34: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addExcludeNodes(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required string clientName = 2; - private java.lang.Object clientName_ = ""; - public boolean hasClientName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - clientName_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setClientName(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - clientName_ = value; - onChanged(); - return this; - } - public Builder clearClientName() { - bitField0_ = (bitField0_ & ~0x00000002); - clientName_ = getDefaultInstance().getClientName(); - onChanged(); - return this; - } - void setClientName(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - clientName_ = value; - onChanged(); - } - - // required .ExtendedBlockProto previous = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto previous_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> previousBuilder_; - public boolean hasPrevious() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getPrevious() { - if (previousBuilder_ == null) { - return previous_; - } else { - return previousBuilder_.getMessage(); - } - } - public Builder setPrevious(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (previousBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - previous_ = value; - onChanged(); - } else { - previousBuilder_.setMessage(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder setPrevious( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { - if (previousBuilder_ == null) { - previous_ = builderForValue.build(); - onChanged(); - } else { - previousBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder mergePrevious(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (previousBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && - previous_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { - previous_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(previous_).mergeFrom(value).buildPartial(); - } else { - previous_ = value; - } - onChanged(); - } else { - previousBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder clearPrevious() { - if (previousBuilder_ == null) { - previous_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - onChanged(); - } else { - previousBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getPreviousBuilder() { - bitField0_ |= 0x00000004; - onChanged(); - return getPreviousFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getPreviousOrBuilder() { - if (previousBuilder_ != null) { - return previousBuilder_.getMessageOrBuilder(); - } else { - return previous_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> - getPreviousFieldBuilder() { - if (previousBuilder_ == null) { - previousBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( - previous_, - getParentForChildren(), - isClean()); - previous_ = null; - } - return previousBuilder_; - } - - // repeated .DatanodeInfoProto excludeNodes = 4; - private java.util.List excludeNodes_ = - java.util.Collections.emptyList(); - private void ensureExcludeNodesIsMutable() { - if (!((bitField0_ & 0x00000008) == 0x00000008)) { - excludeNodes_ = new java.util.ArrayList(excludeNodes_); - bitField0_ |= 0x00000008; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> excludeNodesBuilder_; - - public java.util.List getExcludeNodesList() { - if (excludeNodesBuilder_ == null) { - return java.util.Collections.unmodifiableList(excludeNodes_); - } else { - return excludeNodesBuilder_.getMessageList(); - } - } - public int getExcludeNodesCount() { - if (excludeNodesBuilder_ == null) { - return excludeNodes_.size(); - } else { - return excludeNodesBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExcludeNodes(int index) { - if (excludeNodesBuilder_ == null) { - return excludeNodes_.get(index); - } else { - return excludeNodesBuilder_.getMessage(index); - } - } - public Builder setExcludeNodes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (excludeNodesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureExcludeNodesIsMutable(); - excludeNodes_.set(index, value); - onChanged(); - } else { - excludeNodesBuilder_.setMessage(index, value); - } - return this; - } - public Builder setExcludeNodes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (excludeNodesBuilder_ == null) { - ensureExcludeNodesIsMutable(); - excludeNodes_.set(index, builderForValue.build()); - onChanged(); - } else { - excludeNodesBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addExcludeNodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (excludeNodesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureExcludeNodesIsMutable(); - excludeNodes_.add(value); - onChanged(); - } else { - excludeNodesBuilder_.addMessage(value); - } - return this; - } - public Builder addExcludeNodes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (excludeNodesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureExcludeNodesIsMutable(); - excludeNodes_.add(index, value); - onChanged(); - } else { - excludeNodesBuilder_.addMessage(index, value); - } - return this; - } - public Builder addExcludeNodes( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (excludeNodesBuilder_ == null) { - ensureExcludeNodesIsMutable(); - excludeNodes_.add(builderForValue.build()); - onChanged(); - } else { - excludeNodesBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addExcludeNodes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (excludeNodesBuilder_ == null) { - ensureExcludeNodesIsMutable(); - excludeNodes_.add(index, builderForValue.build()); - onChanged(); - } else { - excludeNodesBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllExcludeNodes( - java.lang.Iterable values) { - if (excludeNodesBuilder_ == null) { - ensureExcludeNodesIsMutable(); - super.addAll(values, excludeNodes_); - onChanged(); - } else { - excludeNodesBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearExcludeNodes() { - if (excludeNodesBuilder_ == null) { - excludeNodes_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - onChanged(); - } else { - excludeNodesBuilder_.clear(); - } - return this; - } - public Builder removeExcludeNodes(int index) { - if (excludeNodesBuilder_ == null) { - ensureExcludeNodesIsMutable(); - excludeNodes_.remove(index); - onChanged(); - } else { - excludeNodesBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getExcludeNodesBuilder( - int index) { - return getExcludeNodesFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExcludeNodesOrBuilder( - int index) { - if (excludeNodesBuilder_ == null) { - return excludeNodes_.get(index); } else { - return excludeNodesBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getExcludeNodesOrBuilderList() { - if (excludeNodesBuilder_ != null) { - return excludeNodesBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(excludeNodes_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addExcludeNodesBuilder() { - return getExcludeNodesFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addExcludeNodesBuilder( - int index) { - return getExcludeNodesFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public java.util.List - getExcludeNodesBuilderList() { - return getExcludeNodesFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> - getExcludeNodesFieldBuilder() { - if (excludeNodesBuilder_ == null) { - excludeNodesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( - excludeNodes_, - ((bitField0_ & 0x00000008) == 0x00000008), - getParentForChildren(), - isClean()); - excludeNodes_ = null; - } - return excludeNodesBuilder_; - } - - // @@protoc_insertion_point(builder_scope:AddBlockRequestProto) - } - - static { - defaultInstance = new AddBlockRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:AddBlockRequestProto) - } - - public interface AddBlockResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .LocatedBlockProto block = 1; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder(); - } - public static final class AddBlockResponseProto extends - com.google.protobuf.GeneratedMessage - implements AddBlockResponseProtoOrBuilder { - // Use AddBlockResponseProto.newBuilder() to construct. - private AddBlockResponseProto(Builder builder) { - super(builder); - } - private AddBlockResponseProto(boolean noInit) {} - - private static final AddBlockResponseProto defaultInstance; - public static AddBlockResponseProto getDefaultInstance() { - return defaultInstance; - } - - public AddBlockResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AddBlockResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AddBlockResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .LocatedBlockProto block = 1; - public static final int BLOCK_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - private void initFields() { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, block_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, block_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto) obj; - - boolean result = true; - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AddBlockResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_AddBlockResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.getDefaultInstance()) return this; - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlock()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .LocatedBlockProto block = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // @@protoc_insertion_point(builder_scope:AddBlockResponseProto) - } - - static { - defaultInstance = new AddBlockResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:AddBlockResponseProto) - } - - public interface GetAdditionalDatanodeRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required .ExtendedBlockProto blk = 2; - boolean hasBlk(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlk(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlkOrBuilder(); - - // repeated .DatanodeInfoProto existings = 3; - java.util.List - getExistingsList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExistings(int index); - int getExistingsCount(); - java.util.List - getExistingsOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExistingsOrBuilder( - int index); - - // repeated .DatanodeInfoProto excludes = 4; - java.util.List - getExcludesList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExcludes(int index); - int getExcludesCount(); - java.util.List - getExcludesOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExcludesOrBuilder( - int index); - - // required uint32 numAdditionalNodes = 5; - boolean hasNumAdditionalNodes(); - int getNumAdditionalNodes(); - - // required string clientName = 6; - boolean hasClientName(); - String getClientName(); - } - public static final class GetAdditionalDatanodeRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetAdditionalDatanodeRequestProtoOrBuilder { - // Use GetAdditionalDatanodeRequestProto.newBuilder() to construct. - private GetAdditionalDatanodeRequestProto(Builder builder) { - super(builder); - } - private GetAdditionalDatanodeRequestProto(boolean noInit) {} - - private static final GetAdditionalDatanodeRequestProto defaultInstance; - public static GetAdditionalDatanodeRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetAdditionalDatanodeRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetAdditionalDatanodeRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetAdditionalDatanodeRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required .ExtendedBlockProto blk = 2; - public static final int BLK_FIELD_NUMBER = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto blk_; - public boolean hasBlk() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlk() { - return blk_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlkOrBuilder() { - return blk_; - } - - // repeated .DatanodeInfoProto existings = 3; - public static final int EXISTINGS_FIELD_NUMBER = 3; - private java.util.List existings_; - public java.util.List getExistingsList() { - return existings_; - } - public java.util.List - getExistingsOrBuilderList() { - return existings_; - } - public int getExistingsCount() { - return existings_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExistings(int index) { - return existings_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExistingsOrBuilder( - int index) { - return existings_.get(index); - } - - // repeated .DatanodeInfoProto excludes = 4; - public static final int EXCLUDES_FIELD_NUMBER = 4; - private java.util.List excludes_; - public java.util.List getExcludesList() { - return excludes_; - } - public java.util.List - getExcludesOrBuilderList() { - return excludes_; - } - public int getExcludesCount() { - return excludes_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExcludes(int index) { - return excludes_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExcludesOrBuilder( - int index) { - return excludes_.get(index); - } - - // required uint32 numAdditionalNodes = 5; - public static final int NUMADDITIONALNODES_FIELD_NUMBER = 5; - private int numAdditionalNodes_; - public boolean hasNumAdditionalNodes() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public int getNumAdditionalNodes() { - return numAdditionalNodes_; - } - - // required string clientName = 6; - public static final int CLIENTNAME_FIELD_NUMBER = 6; - private java.lang.Object clientName_; - public boolean hasClientName() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - clientName_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getClientNameBytes() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - clientName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - src_ = ""; - blk_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - existings_ = java.util.Collections.emptyList(); - excludes_ = java.util.Collections.emptyList(); - numAdditionalNodes_ = 0; - clientName_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBlk()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasNumAdditionalNodes()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasClientName()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlk().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getExistingsCount(); i++) { - if (!getExistings(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - for (int i = 0; i < getExcludesCount(); i++) { - if (!getExcludes(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, blk_); - } - for (int i = 0; i < existings_.size(); i++) { - output.writeMessage(3, existings_.get(i)); - } - for (int i = 0; i < excludes_.size(); i++) { - output.writeMessage(4, excludes_.get(i)); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt32(5, numAdditionalNodes_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBytes(6, getClientNameBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, blk_); - } - for (int i = 0; i < existings_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, existings_.get(i)); - } - for (int i = 0; i < excludes_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, excludes_.get(i)); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(5, numAdditionalNodes_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(6, getClientNameBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasBlk() == other.hasBlk()); - if (hasBlk()) { - result = result && getBlk() - .equals(other.getBlk()); - } - result = result && getExistingsList() - .equals(other.getExistingsList()); - result = result && getExcludesList() - .equals(other.getExcludesList()); - result = result && (hasNumAdditionalNodes() == other.hasNumAdditionalNodes()); - if (hasNumAdditionalNodes()) { - result = result && (getNumAdditionalNodes() - == other.getNumAdditionalNodes()); - } - result = result && (hasClientName() == other.hasClientName()); - if (hasClientName()) { - result = result && getClientName() - .equals(other.getClientName()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasBlk()) { - hash = (37 * hash) + BLK_FIELD_NUMBER; - hash = (53 * hash) + getBlk().hashCode(); - } - if (getExistingsCount() > 0) { - hash = (37 * hash) + EXISTINGS_FIELD_NUMBER; - hash = (53 * hash) + getExistingsList().hashCode(); - } - if (getExcludesCount() > 0) { - hash = (37 * hash) + EXCLUDES_FIELD_NUMBER; - hash = (53 * hash) + getExcludesList().hashCode(); - } - if (hasNumAdditionalNodes()) { - hash = (37 * hash) + NUMADDITIONALNODES_FIELD_NUMBER; - hash = (53 * hash) + getNumAdditionalNodes(); - } - if (hasClientName()) { - hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; - hash = (53 * hash) + getClientName().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetAdditionalDatanodeRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetAdditionalDatanodeRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlkFieldBuilder(); - getExistingsFieldBuilder(); - getExcludesFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - if (blkBuilder_ == null) { - blk_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } else { - blkBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - if (existingsBuilder_ == null) { - existings_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - } else { - existingsBuilder_.clear(); - } - if (excludesBuilder_ == null) { - excludes_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - } else { - excludesBuilder_.clear(); - } - numAdditionalNodes_ = 0; - bitField0_ = (bitField0_ & ~0x00000010); - clientName_ = ""; - bitField0_ = (bitField0_ & ~0x00000020); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (blkBuilder_ == null) { - result.blk_ = blk_; - } else { - result.blk_ = blkBuilder_.build(); - } - if (existingsBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004)) { - existings_ = java.util.Collections.unmodifiableList(existings_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.existings_ = existings_; - } else { - result.existings_ = existingsBuilder_.build(); - } - if (excludesBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008)) { - excludes_ = java.util.Collections.unmodifiableList(excludes_); - bitField0_ = (bitField0_ & ~0x00000008); - } - result.excludes_ = excludes_; - } else { - result.excludes_ = excludesBuilder_.build(); - } - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000004; - } - result.numAdditionalNodes_ = numAdditionalNodes_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000008; - } - result.clientName_ = clientName_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasBlk()) { - mergeBlk(other.getBlk()); - } - if (existingsBuilder_ == null) { - if (!other.existings_.isEmpty()) { - if (existings_.isEmpty()) { - existings_ = other.existings_; - bitField0_ = (bitField0_ & ~0x00000004); - } else { - ensureExistingsIsMutable(); - existings_.addAll(other.existings_); - } - onChanged(); - } - } else { - if (!other.existings_.isEmpty()) { - if (existingsBuilder_.isEmpty()) { - existingsBuilder_.dispose(); - existingsBuilder_ = null; - existings_ = other.existings_; - bitField0_ = (bitField0_ & ~0x00000004); - existingsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getExistingsFieldBuilder() : null; - } else { - existingsBuilder_.addAllMessages(other.existings_); - } - } - } - if (excludesBuilder_ == null) { - if (!other.excludes_.isEmpty()) { - if (excludes_.isEmpty()) { - excludes_ = other.excludes_; - bitField0_ = (bitField0_ & ~0x00000008); - } else { - ensureExcludesIsMutable(); - excludes_.addAll(other.excludes_); - } - onChanged(); - } - } else { - if (!other.excludes_.isEmpty()) { - if (excludesBuilder_.isEmpty()) { - excludesBuilder_.dispose(); - excludesBuilder_ = null; - excludes_ = other.excludes_; - bitField0_ = (bitField0_ & ~0x00000008); - excludesBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getExcludesFieldBuilder() : null; - } else { - excludesBuilder_.addAllMessages(other.excludes_); - } - } - } - if (other.hasNumAdditionalNodes()) { - setNumAdditionalNodes(other.getNumAdditionalNodes()); - } - if (other.hasClientName()) { - setClientName(other.getClientName()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasBlk()) { - - return false; - } - if (!hasNumAdditionalNodes()) { - - return false; - } - if (!hasClientName()) { - - return false; - } - if (!getBlk().isInitialized()) { - - return false; - } - for (int i = 0; i < getExistingsCount(); i++) { - if (!getExistings(i).isInitialized()) { - - return false; - } - } - for (int i = 0; i < getExcludesCount(); i++) { - if (!getExcludes(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 18: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); - if (hasBlk()) { - subBuilder.mergeFrom(getBlk()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlk(subBuilder.buildPartial()); - break; - } - case 26: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addExistings(subBuilder.buildPartial()); - break; - } - case 34: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addExcludes(subBuilder.buildPartial()); - break; - } - case 40: { - bitField0_ |= 0x00000010; - numAdditionalNodes_ = input.readUInt32(); - break; - } - case 50: { - bitField0_ |= 0x00000020; - clientName_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required .ExtendedBlockProto blk = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto blk_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blkBuilder_; - public boolean hasBlk() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlk() { - if (blkBuilder_ == null) { - return blk_; - } else { - return blkBuilder_.getMessage(); - } - } - public Builder setBlk(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blkBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - blk_ = value; - onChanged(); - } else { - blkBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setBlk( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { - if (blkBuilder_ == null) { - blk_ = builderForValue.build(); - onChanged(); - } else { - blkBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergeBlk(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blkBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - blk_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { - blk_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(blk_).mergeFrom(value).buildPartial(); - } else { - blk_ = value; - } - onChanged(); - } else { - blkBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearBlk() { - if (blkBuilder_ == null) { - blk_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - onChanged(); - } else { - blkBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlkBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getBlkFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlkOrBuilder() { - if (blkBuilder_ != null) { - return blkBuilder_.getMessageOrBuilder(); - } else { - return blk_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> - getBlkFieldBuilder() { - if (blkBuilder_ == null) { - blkBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( - blk_, - getParentForChildren(), - isClean()); - blk_ = null; - } - return blkBuilder_; - } - - // repeated .DatanodeInfoProto existings = 3; - private java.util.List existings_ = - java.util.Collections.emptyList(); - private void ensureExistingsIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - existings_ = new java.util.ArrayList(existings_); - bitField0_ |= 0x00000004; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> existingsBuilder_; - - public java.util.List getExistingsList() { - if (existingsBuilder_ == null) { - return java.util.Collections.unmodifiableList(existings_); - } else { - return existingsBuilder_.getMessageList(); - } - } - public int getExistingsCount() { - if (existingsBuilder_ == null) { - return existings_.size(); - } else { - return existingsBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExistings(int index) { - if (existingsBuilder_ == null) { - return existings_.get(index); - } else { - return existingsBuilder_.getMessage(index); - } - } - public Builder setExistings( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (existingsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureExistingsIsMutable(); - existings_.set(index, value); - onChanged(); - } else { - existingsBuilder_.setMessage(index, value); - } - return this; - } - public Builder setExistings( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (existingsBuilder_ == null) { - ensureExistingsIsMutable(); - existings_.set(index, builderForValue.build()); - onChanged(); - } else { - existingsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addExistings(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (existingsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureExistingsIsMutable(); - existings_.add(value); - onChanged(); - } else { - existingsBuilder_.addMessage(value); - } - return this; - } - public Builder addExistings( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (existingsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureExistingsIsMutable(); - existings_.add(index, value); - onChanged(); - } else { - existingsBuilder_.addMessage(index, value); - } - return this; - } - public Builder addExistings( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (existingsBuilder_ == null) { - ensureExistingsIsMutable(); - existings_.add(builderForValue.build()); - onChanged(); - } else { - existingsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addExistings( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (existingsBuilder_ == null) { - ensureExistingsIsMutable(); - existings_.add(index, builderForValue.build()); - onChanged(); - } else { - existingsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllExistings( - java.lang.Iterable values) { - if (existingsBuilder_ == null) { - ensureExistingsIsMutable(); - super.addAll(values, existings_); - onChanged(); - } else { - existingsBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearExistings() { - if (existingsBuilder_ == null) { - existings_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - } else { - existingsBuilder_.clear(); - } - return this; - } - public Builder removeExistings(int index) { - if (existingsBuilder_ == null) { - ensureExistingsIsMutable(); - existings_.remove(index); - onChanged(); - } else { - existingsBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getExistingsBuilder( - int index) { - return getExistingsFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExistingsOrBuilder( - int index) { - if (existingsBuilder_ == null) { - return existings_.get(index); } else { - return existingsBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getExistingsOrBuilderList() { - if (existingsBuilder_ != null) { - return existingsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(existings_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addExistingsBuilder() { - return getExistingsFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addExistingsBuilder( - int index) { - return getExistingsFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public java.util.List - getExistingsBuilderList() { - return getExistingsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> - getExistingsFieldBuilder() { - if (existingsBuilder_ == null) { - existingsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( - existings_, - ((bitField0_ & 0x00000004) == 0x00000004), - getParentForChildren(), - isClean()); - existings_ = null; - } - return existingsBuilder_; - } - - // repeated .DatanodeInfoProto excludes = 4; - private java.util.List excludes_ = - java.util.Collections.emptyList(); - private void ensureExcludesIsMutable() { - if (!((bitField0_ & 0x00000008) == 0x00000008)) { - excludes_ = new java.util.ArrayList(excludes_); - bitField0_ |= 0x00000008; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> excludesBuilder_; - - public java.util.List getExcludesList() { - if (excludesBuilder_ == null) { - return java.util.Collections.unmodifiableList(excludes_); - } else { - return excludesBuilder_.getMessageList(); - } - } - public int getExcludesCount() { - if (excludesBuilder_ == null) { - return excludes_.size(); - } else { - return excludesBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getExcludes(int index) { - if (excludesBuilder_ == null) { - return excludes_.get(index); - } else { - return excludesBuilder_.getMessage(index); - } - } - public Builder setExcludes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (excludesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureExcludesIsMutable(); - excludes_.set(index, value); - onChanged(); - } else { - excludesBuilder_.setMessage(index, value); - } - return this; - } - public Builder setExcludes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (excludesBuilder_ == null) { - ensureExcludesIsMutable(); - excludes_.set(index, builderForValue.build()); - onChanged(); - } else { - excludesBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addExcludes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (excludesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureExcludesIsMutable(); - excludes_.add(value); - onChanged(); - } else { - excludesBuilder_.addMessage(value); - } - return this; - } - public Builder addExcludes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (excludesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureExcludesIsMutable(); - excludes_.add(index, value); - onChanged(); - } else { - excludesBuilder_.addMessage(index, value); - } - return this; - } - public Builder addExcludes( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (excludesBuilder_ == null) { - ensureExcludesIsMutable(); - excludes_.add(builderForValue.build()); - onChanged(); - } else { - excludesBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addExcludes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (excludesBuilder_ == null) { - ensureExcludesIsMutable(); - excludes_.add(index, builderForValue.build()); - onChanged(); - } else { - excludesBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllExcludes( - java.lang.Iterable values) { - if (excludesBuilder_ == null) { - ensureExcludesIsMutable(); - super.addAll(values, excludes_); - onChanged(); - } else { - excludesBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearExcludes() { - if (excludesBuilder_ == null) { - excludes_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - onChanged(); - } else { - excludesBuilder_.clear(); - } - return this; - } - public Builder removeExcludes(int index) { - if (excludesBuilder_ == null) { - ensureExcludesIsMutable(); - excludes_.remove(index); - onChanged(); - } else { - excludesBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getExcludesBuilder( - int index) { - return getExcludesFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getExcludesOrBuilder( - int index) { - if (excludesBuilder_ == null) { - return excludes_.get(index); } else { - return excludesBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getExcludesOrBuilderList() { - if (excludesBuilder_ != null) { - return excludesBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(excludes_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addExcludesBuilder() { - return getExcludesFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addExcludesBuilder( - int index) { - return getExcludesFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public java.util.List - getExcludesBuilderList() { - return getExcludesFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> - getExcludesFieldBuilder() { - if (excludesBuilder_ == null) { - excludesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( - excludes_, - ((bitField0_ & 0x00000008) == 0x00000008), - getParentForChildren(), - isClean()); - excludes_ = null; - } - return excludesBuilder_; - } - - // required uint32 numAdditionalNodes = 5; - private int numAdditionalNodes_ ; - public boolean hasNumAdditionalNodes() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public int getNumAdditionalNodes() { - return numAdditionalNodes_; - } - public Builder setNumAdditionalNodes(int value) { - bitField0_ |= 0x00000010; - numAdditionalNodes_ = value; - onChanged(); - return this; - } - public Builder clearNumAdditionalNodes() { - bitField0_ = (bitField0_ & ~0x00000010); - numAdditionalNodes_ = 0; - onChanged(); - return this; - } - - // required string clientName = 6; - private java.lang.Object clientName_ = ""; - public boolean hasClientName() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - clientName_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setClientName(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000020; - clientName_ = value; - onChanged(); - return this; - } - public Builder clearClientName() { - bitField0_ = (bitField0_ & ~0x00000020); - clientName_ = getDefaultInstance().getClientName(); - onChanged(); - return this; - } - void setClientName(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000020; - clientName_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:GetAdditionalDatanodeRequestProto) - } - - static { - defaultInstance = new GetAdditionalDatanodeRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetAdditionalDatanodeRequestProto) - } - - public interface GetAdditionalDatanodeResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .LocatedBlockProto block = 1; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder(); - } - public static final class GetAdditionalDatanodeResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetAdditionalDatanodeResponseProtoOrBuilder { - // Use GetAdditionalDatanodeResponseProto.newBuilder() to construct. - private GetAdditionalDatanodeResponseProto(Builder builder) { - super(builder); - } - private GetAdditionalDatanodeResponseProto(boolean noInit) {} - - private static final GetAdditionalDatanodeResponseProto defaultInstance; - public static GetAdditionalDatanodeResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetAdditionalDatanodeResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetAdditionalDatanodeResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetAdditionalDatanodeResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .LocatedBlockProto block = 1; - public static final int BLOCK_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - private void initFields() { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, block_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, block_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto) obj; - - boolean result = true; - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetAdditionalDatanodeResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetAdditionalDatanodeResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.getDefaultInstance()) return this; - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlock()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .LocatedBlockProto block = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // @@protoc_insertion_point(builder_scope:GetAdditionalDatanodeResponseProto) - } - - static { - defaultInstance = new GetAdditionalDatanodeResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetAdditionalDatanodeResponseProto) - } - - public interface CompleteRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required string clientName = 2; - boolean hasClientName(); - String getClientName(); - - // required .ExtendedBlockProto last = 3; - boolean hasLast(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getLast(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getLastOrBuilder(); - } - public static final class CompleteRequestProto extends - com.google.protobuf.GeneratedMessage - implements CompleteRequestProtoOrBuilder { - // Use CompleteRequestProto.newBuilder() to construct. - private CompleteRequestProto(Builder builder) { - super(builder); - } - private CompleteRequestProto(boolean noInit) {} - - private static final CompleteRequestProto defaultInstance; - public static CompleteRequestProto getDefaultInstance() { - return defaultInstance; - } - - public CompleteRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CompleteRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CompleteRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string clientName = 2; - public static final int CLIENTNAME_FIELD_NUMBER = 2; - private java.lang.Object clientName_; - public boolean hasClientName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - clientName_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getClientNameBytes() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - clientName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required .ExtendedBlockProto last = 3; - public static final int LAST_FIELD_NUMBER = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto last_; - public boolean hasLast() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getLast() { - return last_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getLastOrBuilder() { - return last_; - } - - private void initFields() { - src_ = ""; - clientName_ = ""; - last_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasClientName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasLast()) { - memoizedIsInitialized = 0; - return false; - } - if (!getLast().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getClientNameBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(3, last_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getClientNameBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, last_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasClientName() == other.hasClientName()); - if (hasClientName()) { - result = result && getClientName() - .equals(other.getClientName()); - } - result = result && (hasLast() == other.hasLast()); - if (hasLast()) { - result = result && getLast() - .equals(other.getLast()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasClientName()) { - hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; - hash = (53 * hash) + getClientName().hashCode(); - } - if (hasLast()) { - hash = (37 * hash) + LAST_FIELD_NUMBER; - hash = (53 * hash) + getLast().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CompleteRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CompleteRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getLastFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - clientName_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - if (lastBuilder_ == null) { - last_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } else { - lastBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.clientName_ = clientName_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - if (lastBuilder_ == null) { - result.last_ = last_; - } else { - result.last_ = lastBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasClientName()) { - setClientName(other.getClientName()); - } - if (other.hasLast()) { - mergeLast(other.getLast()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasClientName()) { - - return false; - } - if (!hasLast()) { - - return false; - } - if (!getLast().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - clientName_ = input.readBytes(); - break; - } - case 26: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); - if (hasLast()) { - subBuilder.mergeFrom(getLast()); - } - input.readMessage(subBuilder, extensionRegistry); - setLast(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required string clientName = 2; - private java.lang.Object clientName_ = ""; - public boolean hasClientName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - clientName_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setClientName(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - clientName_ = value; - onChanged(); - return this; - } - public Builder clearClientName() { - bitField0_ = (bitField0_ & ~0x00000002); - clientName_ = getDefaultInstance().getClientName(); - onChanged(); - return this; - } - void setClientName(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - clientName_ = value; - onChanged(); - } - - // required .ExtendedBlockProto last = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto last_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> lastBuilder_; - public boolean hasLast() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getLast() { - if (lastBuilder_ == null) { - return last_; - } else { - return lastBuilder_.getMessage(); - } - } - public Builder setLast(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (lastBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - last_ = value; - onChanged(); - } else { - lastBuilder_.setMessage(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder setLast( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { - if (lastBuilder_ == null) { - last_ = builderForValue.build(); - onChanged(); - } else { - lastBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder mergeLast(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (lastBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && - last_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { - last_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(last_).mergeFrom(value).buildPartial(); - } else { - last_ = value; - } - onChanged(); - } else { - lastBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder clearLast() { - if (lastBuilder_ == null) { - last_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - onChanged(); - } else { - lastBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getLastBuilder() { - bitField0_ |= 0x00000004; - onChanged(); - return getLastFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getLastOrBuilder() { - if (lastBuilder_ != null) { - return lastBuilder_.getMessageOrBuilder(); - } else { - return last_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> - getLastFieldBuilder() { - if (lastBuilder_ == null) { - lastBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( - last_, - getParentForChildren(), - isClean()); - last_ = null; - } - return lastBuilder_; - } - - // @@protoc_insertion_point(builder_scope:CompleteRequestProto) - } - - static { - defaultInstance = new CompleteRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:CompleteRequestProto) - } - - public interface CompleteResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bool result = 1; - boolean hasResult(); - boolean getResult(); - } - public static final class CompleteResponseProto extends - com.google.protobuf.GeneratedMessage - implements CompleteResponseProtoOrBuilder { - // Use CompleteResponseProto.newBuilder() to construct. - private CompleteResponseProto(Builder builder) { - super(builder); - } - private CompleteResponseProto(boolean noInit) {} - - private static final CompleteResponseProto defaultInstance; - public static CompleteResponseProto getDefaultInstance() { - return defaultInstance; - } - - public CompleteResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CompleteResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CompleteResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required bool result = 1; - public static final int RESULT_FIELD_NUMBER = 1; - private boolean result_; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - - private void initFields() { - result_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasResult()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, result_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, result_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto) obj; - - boolean result = true; - result = result && (hasResult() == other.hasResult()); - if (hasResult()) { - result = result && (getResult() - == other.getResult()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasResult()) { - hash = (37 * hash) + RESULT_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getResult()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CompleteResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CompleteResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - result_ = false; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.result_ = result_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.getDefaultInstance()) return this; - if (other.hasResult()) { - setResult(other.getResult()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasResult()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - result_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required bool result = 1; - private boolean result_ ; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - public Builder setResult(boolean value) { - bitField0_ |= 0x00000001; - result_ = value; - onChanged(); - return this; - } - public Builder clearResult() { - bitField0_ = (bitField0_ & ~0x00000001); - result_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:CompleteResponseProto) - } - - static { - defaultInstance = new CompleteResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:CompleteResponseProto) - } - - public interface ReportBadBlocksRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .LocatedBlockProto blocks = 1; - java.util.List - getBlocksList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index); - int getBlocksCount(); - java.util.List - getBlocksOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( - int index); - } - public static final class ReportBadBlocksRequestProto extends - com.google.protobuf.GeneratedMessage - implements ReportBadBlocksRequestProtoOrBuilder { - // Use ReportBadBlocksRequestProto.newBuilder() to construct. - private ReportBadBlocksRequestProto(Builder builder) { - super(builder); - } - private ReportBadBlocksRequestProto(boolean noInit) {} - - private static final ReportBadBlocksRequestProto defaultInstance; - public static ReportBadBlocksRequestProto getDefaultInstance() { - return defaultInstance; - } - - public ReportBadBlocksRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_fieldAccessorTable; - } - - // repeated .LocatedBlockProto blocks = 1; - public static final int BLOCKS_FIELD_NUMBER = 1; - private java.util.List blocks_; - public java.util.List getBlocksList() { - return blocks_; - } - public java.util.List - getBlocksOrBuilderList() { - return blocks_; - } - public int getBlocksCount() { - return blocks_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) { - return blocks_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( - int index) { - return blocks_.get(index); - } - - private void initFields() { - blocks_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getBlocksCount(); i++) { - if (!getBlocks(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < blocks_.size(); i++) { - output.writeMessage(1, blocks_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < blocks_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, blocks_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto) obj; - - boolean result = true; - result = result && getBlocksList() - .equals(other.getBlocksList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getBlocksCount() > 0) { - hash = (37 * hash) + BLOCKS_FIELD_NUMBER; - hash = (53 * hash) + getBlocksList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlocksFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blocksBuilder_ == null) { - blocks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - blocksBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto(this); - int from_bitField0_ = bitField0_; - if (blocksBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - blocks_ = java.util.Collections.unmodifiableList(blocks_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.blocks_ = blocks_; - } else { - result.blocks_ = blocksBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance()) return this; - if (blocksBuilder_ == null) { - if (!other.blocks_.isEmpty()) { - if (blocks_.isEmpty()) { - blocks_ = other.blocks_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureBlocksIsMutable(); - blocks_.addAll(other.blocks_); - } - onChanged(); - } - } else { - if (!other.blocks_.isEmpty()) { - if (blocksBuilder_.isEmpty()) { - blocksBuilder_.dispose(); - blocksBuilder_ = null; - blocks_ = other.blocks_; - bitField0_ = (bitField0_ & ~0x00000001); - blocksBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getBlocksFieldBuilder() : null; - } else { - blocksBuilder_.addAllMessages(other.blocks_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getBlocksCount(); i++) { - if (!getBlocks(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addBlocks(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // repeated .LocatedBlockProto blocks = 1; - private java.util.List blocks_ = - java.util.Collections.emptyList(); - private void ensureBlocksIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - blocks_ = new java.util.ArrayList(blocks_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blocksBuilder_; - - public java.util.List getBlocksList() { - if (blocksBuilder_ == null) { - return java.util.Collections.unmodifiableList(blocks_); - } else { - return blocksBuilder_.getMessageList(); - } - } - public int getBlocksCount() { - if (blocksBuilder_ == null) { - return blocks_.size(); - } else { - return blocksBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) { - if (blocksBuilder_ == null) { - return blocks_.get(index); - } else { - return blocksBuilder_.getMessage(index); - } - } - public Builder setBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.set(index, value); - onChanged(); - } else { - blocksBuilder_.setMessage(index, value); - } - return this; - } - public Builder setBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.set(index, builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.add(value); - onChanged(); - } else { - blocksBuilder_.addMessage(value); - } - return this; - } - public Builder addBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.add(index, value); - onChanged(); - } else { - blocksBuilder_.addMessage(index, value); - } - return this; - } - public Builder addBlocks( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.add(builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.add(index, builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllBlocks( - java.lang.Iterable values) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - super.addAll(values, blocks_); - onChanged(); - } else { - blocksBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearBlocks() { - if (blocksBuilder_ == null) { - blocks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - blocksBuilder_.clear(); - } - return this; - } - public Builder removeBlocks(int index) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.remove(index); - onChanged(); - } else { - blocksBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlocksBuilder( - int index) { - return getBlocksFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( - int index) { - if (blocksBuilder_ == null) { - return blocks_.get(index); } else { - return blocksBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getBlocksOrBuilderList() { - if (blocksBuilder_ != null) { - return blocksBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(blocks_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder() { - return getBlocksFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder( - int index) { - return getBlocksFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()); - } - public java.util.List - getBlocksBuilderList() { - return getBlocksFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> - getBlocksFieldBuilder() { - if (blocksBuilder_ == null) { - blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( - blocks_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - blocks_ = null; - } - return blocksBuilder_; - } - - // @@protoc_insertion_point(builder_scope:ReportBadBlocksRequestProto) - } - - static { - defaultInstance = new ReportBadBlocksRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ReportBadBlocksRequestProto) - } - - public interface ReportBadBlocksResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class ReportBadBlocksResponseProto extends - com.google.protobuf.GeneratedMessage - implements ReportBadBlocksResponseProtoOrBuilder { - // Use ReportBadBlocksResponseProto.newBuilder() to construct. - private ReportBadBlocksResponseProto(Builder builder) { - super(builder); - } - private ReportBadBlocksResponseProto(boolean noInit) {} - - private static final ReportBadBlocksResponseProto defaultInstance; - public static ReportBadBlocksResponseProto getDefaultInstance() { - return defaultInstance; - } - - public ReportBadBlocksResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:ReportBadBlocksResponseProto) - } - - static { - defaultInstance = new ReportBadBlocksResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ReportBadBlocksResponseProto) - } - - public interface ConcatRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string trg = 1; - boolean hasTrg(); - String getTrg(); - - // repeated string srcs = 2; - java.util.List getSrcsList(); - int getSrcsCount(); - String getSrcs(int index); - } - public static final class ConcatRequestProto extends - com.google.protobuf.GeneratedMessage - implements ConcatRequestProtoOrBuilder { - // Use ConcatRequestProto.newBuilder() to construct. - private ConcatRequestProto(Builder builder) { - super(builder); - } - private ConcatRequestProto(boolean noInit) {} - - private static final ConcatRequestProto defaultInstance; - public static ConcatRequestProto getDefaultInstance() { - return defaultInstance; - } - - public ConcatRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ConcatRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ConcatRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string trg = 1; - public static final int TRG_FIELD_NUMBER = 1; - private java.lang.Object trg_; - public boolean hasTrg() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getTrg() { - java.lang.Object ref = trg_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - trg_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getTrgBytes() { - java.lang.Object ref = trg_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - trg_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // repeated string srcs = 2; - public static final int SRCS_FIELD_NUMBER = 2; - private com.google.protobuf.LazyStringList srcs_; - public java.util.List - getSrcsList() { - return srcs_; - } - public int getSrcsCount() { - return srcs_.size(); - } - public String getSrcs(int index) { - return srcs_.get(index); - } - - private void initFields() { - trg_ = ""; - srcs_ = com.google.protobuf.LazyStringArrayList.EMPTY; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasTrg()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getTrgBytes()); - } - for (int i = 0; i < srcs_.size(); i++) { - output.writeBytes(2, srcs_.getByteString(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getTrgBytes()); - } - { - int dataSize = 0; - for (int i = 0; i < srcs_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeBytesSizeNoTag(srcs_.getByteString(i)); - } - size += dataSize; - size += 1 * getSrcsList().size(); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto) obj; - - boolean result = true; - result = result && (hasTrg() == other.hasTrg()); - if (hasTrg()) { - result = result && getTrg() - .equals(other.getTrg()); - } - result = result && getSrcsList() - .equals(other.getSrcsList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasTrg()) { - hash = (37 * hash) + TRG_FIELD_NUMBER; - hash = (53 * hash) + getTrg().hashCode(); - } - if (getSrcsCount() > 0) { - hash = (37 * hash) + SRCS_FIELD_NUMBER; - hash = (53 * hash) + getSrcsList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ConcatRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ConcatRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - trg_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - srcs_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.trg_ = trg_; - if (((bitField0_ & 0x00000002) == 0x00000002)) { - srcs_ = new com.google.protobuf.UnmodifiableLazyStringList( - srcs_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.srcs_ = srcs_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.getDefaultInstance()) return this; - if (other.hasTrg()) { - setTrg(other.getTrg()); - } - if (!other.srcs_.isEmpty()) { - if (srcs_.isEmpty()) { - srcs_ = other.srcs_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureSrcsIsMutable(); - srcs_.addAll(other.srcs_); - } - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasTrg()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - trg_ = input.readBytes(); - break; - } - case 18: { - ensureSrcsIsMutable(); - srcs_.add(input.readBytes()); - break; - } - } - } - } - - private int bitField0_; - - // required string trg = 1; - private java.lang.Object trg_ = ""; - public boolean hasTrg() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getTrg() { - java.lang.Object ref = trg_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - trg_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setTrg(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - trg_ = value; - onChanged(); - return this; - } - public Builder clearTrg() { - bitField0_ = (bitField0_ & ~0x00000001); - trg_ = getDefaultInstance().getTrg(); - onChanged(); - return this; - } - void setTrg(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - trg_ = value; - onChanged(); - } - - // repeated string srcs = 2; - private com.google.protobuf.LazyStringList srcs_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensureSrcsIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - srcs_ = new com.google.protobuf.LazyStringArrayList(srcs_); - bitField0_ |= 0x00000002; - } - } - public java.util.List - getSrcsList() { - return java.util.Collections.unmodifiableList(srcs_); - } - public int getSrcsCount() { - return srcs_.size(); - } - public String getSrcs(int index) { - return srcs_.get(index); - } - public Builder setSrcs( - int index, String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureSrcsIsMutable(); - srcs_.set(index, value); - onChanged(); - return this; - } - public Builder addSrcs(String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureSrcsIsMutable(); - srcs_.add(value); - onChanged(); - return this; - } - public Builder addAllSrcs( - java.lang.Iterable values) { - ensureSrcsIsMutable(); - super.addAll(values, srcs_); - onChanged(); - return this; - } - public Builder clearSrcs() { - srcs_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - return this; - } - void addSrcs(com.google.protobuf.ByteString value) { - ensureSrcsIsMutable(); - srcs_.add(value); - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:ConcatRequestProto) - } - - static { - defaultInstance = new ConcatRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ConcatRequestProto) - } - - public interface ConcatResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class ConcatResponseProto extends - com.google.protobuf.GeneratedMessage - implements ConcatResponseProtoOrBuilder { - // Use ConcatResponseProto.newBuilder() to construct. - private ConcatResponseProto(Builder builder) { - super(builder); - } - private ConcatResponseProto(boolean noInit) {} - - private static final ConcatResponseProto defaultInstance; - public static ConcatResponseProto getDefaultInstance() { - return defaultInstance; - } - - public ConcatResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ConcatResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ConcatResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ConcatResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ConcatResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:ConcatResponseProto) - } - - static { - defaultInstance = new ConcatResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ConcatResponseProto) - } - - public interface RenameRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required string dst = 2; - boolean hasDst(); - String getDst(); - } - public static final class RenameRequestProto extends - com.google.protobuf.GeneratedMessage - implements RenameRequestProtoOrBuilder { - // Use RenameRequestProto.newBuilder() to construct. - private RenameRequestProto(Builder builder) { - super(builder); - } - private RenameRequestProto(boolean noInit) {} - - private static final RenameRequestProto defaultInstance; - public static RenameRequestProto getDefaultInstance() { - return defaultInstance; - } - - public RenameRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenameRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenameRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string dst = 2; - public static final int DST_FIELD_NUMBER = 2; - private java.lang.Object dst_; - public boolean hasDst() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getDst() { - java.lang.Object ref = dst_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - dst_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getDstBytes() { - java.lang.Object ref = dst_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - dst_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - src_ = ""; - dst_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasDst()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getDstBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getDstBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasDst() == other.hasDst()); - if (hasDst()) { - result = result && getDst() - .equals(other.getDst()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasDst()) { - hash = (37 * hash) + DST_FIELD_NUMBER; - hash = (53 * hash) + getDst().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenameRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenameRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - dst_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.dst_ = dst_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasDst()) { - setDst(other.getDst()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasDst()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - dst_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required string dst = 2; - private java.lang.Object dst_ = ""; - public boolean hasDst() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getDst() { - java.lang.Object ref = dst_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - dst_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setDst(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - dst_ = value; - onChanged(); - return this; - } - public Builder clearDst() { - bitField0_ = (bitField0_ & ~0x00000002); - dst_ = getDefaultInstance().getDst(); - onChanged(); - return this; - } - void setDst(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - dst_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:RenameRequestProto) - } - - static { - defaultInstance = new RenameRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RenameRequestProto) - } - - public interface RenameResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bool result = 1; - boolean hasResult(); - boolean getResult(); - } - public static final class RenameResponseProto extends - com.google.protobuf.GeneratedMessage - implements RenameResponseProtoOrBuilder { - // Use RenameResponseProto.newBuilder() to construct. - private RenameResponseProto(Builder builder) { - super(builder); - } - private RenameResponseProto(boolean noInit) {} - - private static final RenameResponseProto defaultInstance; - public static RenameResponseProto getDefaultInstance() { - return defaultInstance; - } - - public RenameResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenameResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenameResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required bool result = 1; - public static final int RESULT_FIELD_NUMBER = 1; - private boolean result_; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - - private void initFields() { - result_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasResult()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, result_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, result_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto) obj; - - boolean result = true; - result = result && (hasResult() == other.hasResult()); - if (hasResult()) { - result = result && (getResult() - == other.getResult()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasResult()) { - hash = (37 * hash) + RESULT_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getResult()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenameResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenameResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - result_ = false; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.result_ = result_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.getDefaultInstance()) return this; - if (other.hasResult()) { - setResult(other.getResult()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasResult()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - result_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required bool result = 1; - private boolean result_ ; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - public Builder setResult(boolean value) { - bitField0_ |= 0x00000001; - result_ = value; - onChanged(); - return this; - } - public Builder clearResult() { - bitField0_ = (bitField0_ & ~0x00000001); - result_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:RenameResponseProto) - } - - static { - defaultInstance = new RenameResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RenameResponseProto) - } - - public interface Rename2RequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required string dst = 2; - boolean hasDst(); - String getDst(); - - // required bool overwriteDest = 3; - boolean hasOverwriteDest(); - boolean getOverwriteDest(); - } - public static final class Rename2RequestProto extends - com.google.protobuf.GeneratedMessage - implements Rename2RequestProtoOrBuilder { - // Use Rename2RequestProto.newBuilder() to construct. - private Rename2RequestProto(Builder builder) { - super(builder); - } - private Rename2RequestProto(boolean noInit) {} - - private static final Rename2RequestProto defaultInstance; - public static Rename2RequestProto getDefaultInstance() { - return defaultInstance; - } - - public Rename2RequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_Rename2RequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_Rename2RequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string dst = 2; - public static final int DST_FIELD_NUMBER = 2; - private java.lang.Object dst_; - public boolean hasDst() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getDst() { - java.lang.Object ref = dst_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - dst_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getDstBytes() { - java.lang.Object ref = dst_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - dst_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required bool overwriteDest = 3; - public static final int OVERWRITEDEST_FIELD_NUMBER = 3; - private boolean overwriteDest_; - public boolean hasOverwriteDest() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public boolean getOverwriteDest() { - return overwriteDest_; - } - - private void initFields() { - src_ = ""; - dst_ = ""; - overwriteDest_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasDst()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasOverwriteDest()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getDstBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBool(3, overwriteDest_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getDstBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(3, overwriteDest_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasDst() == other.hasDst()); - if (hasDst()) { - result = result && getDst() - .equals(other.getDst()); - } - result = result && (hasOverwriteDest() == other.hasOverwriteDest()); - if (hasOverwriteDest()) { - result = result && (getOverwriteDest() - == other.getOverwriteDest()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasDst()) { - hash = (37 * hash) + DST_FIELD_NUMBER; - hash = (53 * hash) + getDst().hashCode(); - } - if (hasOverwriteDest()) { - hash = (37 * hash) + OVERWRITEDEST_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getOverwriteDest()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_Rename2RequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_Rename2RequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - dst_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - overwriteDest_ = false; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.dst_ = dst_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.overwriteDest_ = overwriteDest_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasDst()) { - setDst(other.getDst()); - } - if (other.hasOverwriteDest()) { - setOverwriteDest(other.getOverwriteDest()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasDst()) { - - return false; - } - if (!hasOverwriteDest()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - dst_ = input.readBytes(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - overwriteDest_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required string dst = 2; - private java.lang.Object dst_ = ""; - public boolean hasDst() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getDst() { - java.lang.Object ref = dst_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - dst_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setDst(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - dst_ = value; - onChanged(); - return this; - } - public Builder clearDst() { - bitField0_ = (bitField0_ & ~0x00000002); - dst_ = getDefaultInstance().getDst(); - onChanged(); - return this; - } - void setDst(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - dst_ = value; - onChanged(); - } - - // required bool overwriteDest = 3; - private boolean overwriteDest_ ; - public boolean hasOverwriteDest() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public boolean getOverwriteDest() { - return overwriteDest_; - } - public Builder setOverwriteDest(boolean value) { - bitField0_ |= 0x00000004; - overwriteDest_ = value; - onChanged(); - return this; - } - public Builder clearOverwriteDest() { - bitField0_ = (bitField0_ & ~0x00000004); - overwriteDest_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:Rename2RequestProto) - } - - static { - defaultInstance = new Rename2RequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:Rename2RequestProto) - } - - public interface Rename2ResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class Rename2ResponseProto extends - com.google.protobuf.GeneratedMessage - implements Rename2ResponseProtoOrBuilder { - // Use Rename2ResponseProto.newBuilder() to construct. - private Rename2ResponseProto(Builder builder) { - super(builder); - } - private Rename2ResponseProto(boolean noInit) {} - - private static final Rename2ResponseProto defaultInstance; - public static Rename2ResponseProto getDefaultInstance() { - return defaultInstance; - } - - public Rename2ResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_Rename2ResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_Rename2ResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_Rename2ResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_Rename2ResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:Rename2ResponseProto) - } - - static { - defaultInstance = new Rename2ResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:Rename2ResponseProto) - } - - public interface DeleteRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required bool recursive = 2; - boolean hasRecursive(); - boolean getRecursive(); - } - public static final class DeleteRequestProto extends - com.google.protobuf.GeneratedMessage - implements DeleteRequestProtoOrBuilder { - // Use DeleteRequestProto.newBuilder() to construct. - private DeleteRequestProto(Builder builder) { - super(builder); - } - private DeleteRequestProto(boolean noInit) {} - - private static final DeleteRequestProto defaultInstance; - public static DeleteRequestProto getDefaultInstance() { - return defaultInstance; - } - - public DeleteRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DeleteRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DeleteRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required bool recursive = 2; - public static final int RECURSIVE_FIELD_NUMBER = 2; - private boolean recursive_; - public boolean hasRecursive() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public boolean getRecursive() { - return recursive_; - } - - private void initFields() { - src_ = ""; - recursive_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasRecursive()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(2, recursive_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(2, recursive_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasRecursive() == other.hasRecursive()); - if (hasRecursive()) { - result = result && (getRecursive() - == other.getRecursive()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasRecursive()) { - hash = (37 * hash) + RECURSIVE_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getRecursive()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DeleteRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DeleteRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - recursive_ = false; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.recursive_ = recursive_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasRecursive()) { - setRecursive(other.getRecursive()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasRecursive()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - recursive_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required bool recursive = 2; - private boolean recursive_ ; - public boolean hasRecursive() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public boolean getRecursive() { - return recursive_; - } - public Builder setRecursive(boolean value) { - bitField0_ |= 0x00000002; - recursive_ = value; - onChanged(); - return this; - } - public Builder clearRecursive() { - bitField0_ = (bitField0_ & ~0x00000002); - recursive_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:DeleteRequestProto) - } - - static { - defaultInstance = new DeleteRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:DeleteRequestProto) - } - - public interface DeleteResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bool result = 1; - boolean hasResult(); - boolean getResult(); - } - public static final class DeleteResponseProto extends - com.google.protobuf.GeneratedMessage - implements DeleteResponseProtoOrBuilder { - // Use DeleteResponseProto.newBuilder() to construct. - private DeleteResponseProto(Builder builder) { - super(builder); - } - private DeleteResponseProto(boolean noInit) {} - - private static final DeleteResponseProto defaultInstance; - public static DeleteResponseProto getDefaultInstance() { - return defaultInstance; - } - - public DeleteResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DeleteResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DeleteResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required bool result = 1; - public static final int RESULT_FIELD_NUMBER = 1; - private boolean result_; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - - private void initFields() { - result_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasResult()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, result_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, result_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto) obj; - - boolean result = true; - result = result && (hasResult() == other.hasResult()); - if (hasResult()) { - result = result && (getResult() - == other.getResult()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasResult()) { - hash = (37 * hash) + RESULT_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getResult()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DeleteResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DeleteResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - result_ = false; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.result_ = result_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.getDefaultInstance()) return this; - if (other.hasResult()) { - setResult(other.getResult()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasResult()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - result_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required bool result = 1; - private boolean result_ ; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - public Builder setResult(boolean value) { - bitField0_ |= 0x00000001; - result_ = value; - onChanged(); - return this; - } - public Builder clearResult() { - bitField0_ = (bitField0_ & ~0x00000001); - result_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:DeleteResponseProto) - } - - static { - defaultInstance = new DeleteResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:DeleteResponseProto) - } - - public interface MkdirsRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required .FsPermissionProto masked = 2; - boolean hasMasked(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getMasked(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getMaskedOrBuilder(); - - // required bool createParent = 3; - boolean hasCreateParent(); - boolean getCreateParent(); - } - public static final class MkdirsRequestProto extends - com.google.protobuf.GeneratedMessage - implements MkdirsRequestProtoOrBuilder { - // Use MkdirsRequestProto.newBuilder() to construct. - private MkdirsRequestProto(Builder builder) { - super(builder); - } - private MkdirsRequestProto(boolean noInit) {} - - private static final MkdirsRequestProto defaultInstance; - public static MkdirsRequestProto getDefaultInstance() { - return defaultInstance; - } - - public MkdirsRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MkdirsRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MkdirsRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required .FsPermissionProto masked = 2; - public static final int MASKED_FIELD_NUMBER = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto masked_; - public boolean hasMasked() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getMasked() { - return masked_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getMaskedOrBuilder() { - return masked_; - } - - // required bool createParent = 3; - public static final int CREATEPARENT_FIELD_NUMBER = 3; - private boolean createParent_; - public boolean hasCreateParent() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public boolean getCreateParent() { - return createParent_; - } - - private void initFields() { - src_ = ""; - masked_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - createParent_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasMasked()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCreateParent()) { - memoizedIsInitialized = 0; - return false; - } - if (!getMasked().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, masked_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBool(3, createParent_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, masked_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(3, createParent_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasMasked() == other.hasMasked()); - if (hasMasked()) { - result = result && getMasked() - .equals(other.getMasked()); - } - result = result && (hasCreateParent() == other.hasCreateParent()); - if (hasCreateParent()) { - result = result && (getCreateParent() - == other.getCreateParent()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasMasked()) { - hash = (37 * hash) + MASKED_FIELD_NUMBER; - hash = (53 * hash) + getMasked().hashCode(); - } - if (hasCreateParent()) { - hash = (37 * hash) + CREATEPARENT_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getCreateParent()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MkdirsRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MkdirsRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getMaskedFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - if (maskedBuilder_ == null) { - masked_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - } else { - maskedBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - createParent_ = false; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (maskedBuilder_ == null) { - result.masked_ = masked_; - } else { - result.masked_ = maskedBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.createParent_ = createParent_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasMasked()) { - mergeMasked(other.getMasked()); - } - if (other.hasCreateParent()) { - setCreateParent(other.getCreateParent()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasMasked()) { - - return false; - } - if (!hasCreateParent()) { - - return false; - } - if (!getMasked().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 18: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder(); - if (hasMasked()) { - subBuilder.mergeFrom(getMasked()); - } - input.readMessage(subBuilder, extensionRegistry); - setMasked(subBuilder.buildPartial()); - break; - } - case 24: { - bitField0_ |= 0x00000004; - createParent_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required .FsPermissionProto masked = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto masked_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> maskedBuilder_; - public boolean hasMasked() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getMasked() { - if (maskedBuilder_ == null) { - return masked_; - } else { - return maskedBuilder_.getMessage(); - } - } - public Builder setMasked(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) { - if (maskedBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - masked_ = value; - onChanged(); - } else { - maskedBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setMasked( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder builderForValue) { - if (maskedBuilder_ == null) { - masked_ = builderForValue.build(); - onChanged(); - } else { - maskedBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergeMasked(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) { - if (maskedBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - masked_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance()) { - masked_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder(masked_).mergeFrom(value).buildPartial(); - } else { - masked_ = value; - } - onChanged(); - } else { - maskedBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearMasked() { - if (maskedBuilder_ == null) { - masked_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - onChanged(); - } else { - maskedBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder getMaskedBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getMaskedFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getMaskedOrBuilder() { - if (maskedBuilder_ != null) { - return maskedBuilder_.getMessageOrBuilder(); - } else { - return masked_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> - getMaskedFieldBuilder() { - if (maskedBuilder_ == null) { - maskedBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder>( - masked_, - getParentForChildren(), - isClean()); - masked_ = null; - } - return maskedBuilder_; - } - - // required bool createParent = 3; - private boolean createParent_ ; - public boolean hasCreateParent() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public boolean getCreateParent() { - return createParent_; - } - public Builder setCreateParent(boolean value) { - bitField0_ |= 0x00000004; - createParent_ = value; - onChanged(); - return this; - } - public Builder clearCreateParent() { - bitField0_ = (bitField0_ & ~0x00000004); - createParent_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:MkdirsRequestProto) - } - - static { - defaultInstance = new MkdirsRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:MkdirsRequestProto) - } - - public interface MkdirsResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bool result = 1; - boolean hasResult(); - boolean getResult(); - } - public static final class MkdirsResponseProto extends - com.google.protobuf.GeneratedMessage - implements MkdirsResponseProtoOrBuilder { - // Use MkdirsResponseProto.newBuilder() to construct. - private MkdirsResponseProto(Builder builder) { - super(builder); - } - private MkdirsResponseProto(boolean noInit) {} - - private static final MkdirsResponseProto defaultInstance; - public static MkdirsResponseProto getDefaultInstance() { - return defaultInstance; - } - - public MkdirsResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MkdirsResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MkdirsResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required bool result = 1; - public static final int RESULT_FIELD_NUMBER = 1; - private boolean result_; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - - private void initFields() { - result_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasResult()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, result_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, result_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto) obj; - - boolean result = true; - result = result && (hasResult() == other.hasResult()); - if (hasResult()) { - result = result && (getResult() - == other.getResult()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasResult()) { - hash = (37 * hash) + RESULT_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getResult()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MkdirsResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MkdirsResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - result_ = false; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.result_ = result_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.getDefaultInstance()) return this; - if (other.hasResult()) { - setResult(other.getResult()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasResult()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - result_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required bool result = 1; - private boolean result_ ; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - public Builder setResult(boolean value) { - bitField0_ |= 0x00000001; - result_ = value; - onChanged(); - return this; - } - public Builder clearResult() { - bitField0_ = (bitField0_ & ~0x00000001); - result_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:MkdirsResponseProto) - } - - static { - defaultInstance = new MkdirsResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:MkdirsResponseProto) - } - - public interface GetListingRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required bytes startAfter = 2; - boolean hasStartAfter(); - com.google.protobuf.ByteString getStartAfter(); - - // required bool needLocation = 3; - boolean hasNeedLocation(); - boolean getNeedLocation(); - } - public static final class GetListingRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetListingRequestProtoOrBuilder { - // Use GetListingRequestProto.newBuilder() to construct. - private GetListingRequestProto(Builder builder) { - super(builder); - } - private GetListingRequestProto(boolean noInit) {} - - private static final GetListingRequestProto defaultInstance; - public static GetListingRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetListingRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetListingRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetListingRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required bytes startAfter = 2; - public static final int STARTAFTER_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString startAfter_; - public boolean hasStartAfter() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public com.google.protobuf.ByteString getStartAfter() { - return startAfter_; - } - - // required bool needLocation = 3; - public static final int NEEDLOCATION_FIELD_NUMBER = 3; - private boolean needLocation_; - public boolean hasNeedLocation() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public boolean getNeedLocation() { - return needLocation_; - } - - private void initFields() { - src_ = ""; - startAfter_ = com.google.protobuf.ByteString.EMPTY; - needLocation_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasStartAfter()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasNeedLocation()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, startAfter_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBool(3, needLocation_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, startAfter_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(3, needLocation_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasStartAfter() == other.hasStartAfter()); - if (hasStartAfter()) { - result = result && getStartAfter() - .equals(other.getStartAfter()); - } - result = result && (hasNeedLocation() == other.hasNeedLocation()); - if (hasNeedLocation()) { - result = result && (getNeedLocation() - == other.getNeedLocation()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasStartAfter()) { - hash = (37 * hash) + STARTAFTER_FIELD_NUMBER; - hash = (53 * hash) + getStartAfter().hashCode(); - } - if (hasNeedLocation()) { - hash = (37 * hash) + NEEDLOCATION_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getNeedLocation()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetListingRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetListingRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - startAfter_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - needLocation_ = false; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.startAfter_ = startAfter_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.needLocation_ = needLocation_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasStartAfter()) { - setStartAfter(other.getStartAfter()); - } - if (other.hasNeedLocation()) { - setNeedLocation(other.getNeedLocation()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasStartAfter()) { - - return false; - } - if (!hasNeedLocation()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - startAfter_ = input.readBytes(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - needLocation_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required bytes startAfter = 2; - private com.google.protobuf.ByteString startAfter_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasStartAfter() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public com.google.protobuf.ByteString getStartAfter() { - return startAfter_; - } - public Builder setStartAfter(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - startAfter_ = value; - onChanged(); - return this; - } - public Builder clearStartAfter() { - bitField0_ = (bitField0_ & ~0x00000002); - startAfter_ = getDefaultInstance().getStartAfter(); - onChanged(); - return this; - } - - // required bool needLocation = 3; - private boolean needLocation_ ; - public boolean hasNeedLocation() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public boolean getNeedLocation() { - return needLocation_; - } - public Builder setNeedLocation(boolean value) { - bitField0_ |= 0x00000004; - needLocation_ = value; - onChanged(); - return this; - } - public Builder clearNeedLocation() { - bitField0_ = (bitField0_ & ~0x00000004); - needLocation_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:GetListingRequestProto) - } - - static { - defaultInstance = new GetListingRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetListingRequestProto) - } - - public interface GetListingResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .DirectoryListingProto dirList = 1; - boolean hasDirList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDirList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder getDirListOrBuilder(); - } - public static final class GetListingResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetListingResponseProtoOrBuilder { - // Use GetListingResponseProto.newBuilder() to construct. - private GetListingResponseProto(Builder builder) { - super(builder); - } - private GetListingResponseProto(boolean noInit) {} - - private static final GetListingResponseProto defaultInstance; - public static GetListingResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetListingResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetListingResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetListingResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .DirectoryListingProto dirList = 1; - public static final int DIRLIST_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto dirList_; - public boolean hasDirList() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDirList() { - return dirList_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder getDirListOrBuilder() { - return dirList_; - } - - private void initFields() { - dirList_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasDirList()) { - memoizedIsInitialized = 0; - return false; - } - if (!getDirList().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, dirList_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, dirList_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto) obj; - - boolean result = true; - result = result && (hasDirList() == other.hasDirList()); - if (hasDirList()) { - result = result && getDirList() - .equals(other.getDirList()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasDirList()) { - hash = (37 * hash) + DIRLIST_FIELD_NUMBER; - hash = (53 * hash) + getDirList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetListingResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetListingResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getDirListFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (dirListBuilder_ == null) { - dirList_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance(); - } else { - dirListBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (dirListBuilder_ == null) { - result.dirList_ = dirList_; - } else { - result.dirList_ = dirListBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.getDefaultInstance()) return this; - if (other.hasDirList()) { - mergeDirList(other.getDirList()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasDirList()) { - - return false; - } - if (!getDirList().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.newBuilder(); - if (hasDirList()) { - subBuilder.mergeFrom(getDirList()); - } - input.readMessage(subBuilder, extensionRegistry); - setDirList(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .DirectoryListingProto dirList = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto dirList_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder> dirListBuilder_; - public boolean hasDirList() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDirList() { - if (dirListBuilder_ == null) { - return dirList_; - } else { - return dirListBuilder_.getMessage(); - } - } - public Builder setDirList(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto value) { - if (dirListBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - dirList_ = value; - onChanged(); - } else { - dirListBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setDirList( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder builderForValue) { - if (dirListBuilder_ == null) { - dirList_ = builderForValue.build(); - onChanged(); - } else { - dirListBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeDirList(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto value) { - if (dirListBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - dirList_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance()) { - dirList_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.newBuilder(dirList_).mergeFrom(value).buildPartial(); - } else { - dirList_ = value; - } - onChanged(); - } else { - dirListBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearDirList() { - if (dirListBuilder_ == null) { - dirList_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance(); - onChanged(); - } else { - dirListBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder getDirListBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getDirListFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder getDirListOrBuilder() { - if (dirListBuilder_ != null) { - return dirListBuilder_.getMessageOrBuilder(); - } else { - return dirList_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder> - getDirListFieldBuilder() { - if (dirListBuilder_ == null) { - dirListBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder>( - dirList_, - getParentForChildren(), - isClean()); - dirList_ = null; - } - return dirListBuilder_; - } - - // @@protoc_insertion_point(builder_scope:GetListingResponseProto) - } - - static { - defaultInstance = new GetListingResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetListingResponseProto) - } - - public interface RenewLeaseRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string clientName = 1; - boolean hasClientName(); - String getClientName(); - } - public static final class RenewLeaseRequestProto extends - com.google.protobuf.GeneratedMessage - implements RenewLeaseRequestProtoOrBuilder { - // Use RenewLeaseRequestProto.newBuilder() to construct. - private RenewLeaseRequestProto(Builder builder) { - super(builder); - } - private RenewLeaseRequestProto(boolean noInit) {} - - private static final RenewLeaseRequestProto defaultInstance; - public static RenewLeaseRequestProto getDefaultInstance() { - return defaultInstance; - } - - public RenewLeaseRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewLeaseRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewLeaseRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string clientName = 1; - public static final int CLIENTNAME_FIELD_NUMBER = 1; - private java.lang.Object clientName_; - public boolean hasClientName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - clientName_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getClientNameBytes() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - clientName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - clientName_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasClientName()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getClientNameBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getClientNameBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto) obj; - - boolean result = true; - result = result && (hasClientName() == other.hasClientName()); - if (hasClientName()) { - result = result && getClientName() - .equals(other.getClientName()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasClientName()) { - hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; - hash = (53 * hash) + getClientName().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewLeaseRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewLeaseRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - clientName_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.clientName_ = clientName_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.getDefaultInstance()) return this; - if (other.hasClientName()) { - setClientName(other.getClientName()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasClientName()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - clientName_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string clientName = 1; - private java.lang.Object clientName_ = ""; - public boolean hasClientName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - clientName_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setClientName(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - clientName_ = value; - onChanged(); - return this; - } - public Builder clearClientName() { - bitField0_ = (bitField0_ & ~0x00000001); - clientName_ = getDefaultInstance().getClientName(); - onChanged(); - return this; - } - void setClientName(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - clientName_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:RenewLeaseRequestProto) - } - - static { - defaultInstance = new RenewLeaseRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RenewLeaseRequestProto) - } - - public interface RenewLeaseResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class RenewLeaseResponseProto extends - com.google.protobuf.GeneratedMessage - implements RenewLeaseResponseProtoOrBuilder { - // Use RenewLeaseResponseProto.newBuilder() to construct. - private RenewLeaseResponseProto(Builder builder) { - super(builder); - } - private RenewLeaseResponseProto(boolean noInit) {} - - private static final RenewLeaseResponseProto defaultInstance; - public static RenewLeaseResponseProto getDefaultInstance() { - return defaultInstance; - } - - public RenewLeaseResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewLeaseResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewLeaseResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewLeaseResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewLeaseResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:RenewLeaseResponseProto) - } - - static { - defaultInstance = new RenewLeaseResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RenewLeaseResponseProto) - } - - public interface RecoverLeaseRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required string clientName = 2; - boolean hasClientName(); - String getClientName(); - } - public static final class RecoverLeaseRequestProto extends - com.google.protobuf.GeneratedMessage - implements RecoverLeaseRequestProtoOrBuilder { - // Use RecoverLeaseRequestProto.newBuilder() to construct. - private RecoverLeaseRequestProto(Builder builder) { - super(builder); - } - private RecoverLeaseRequestProto(boolean noInit) {} - - private static final RecoverLeaseRequestProto defaultInstance; - public static RecoverLeaseRequestProto getDefaultInstance() { - return defaultInstance; - } - - public RecoverLeaseRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RecoverLeaseRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RecoverLeaseRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string clientName = 2; - public static final int CLIENTNAME_FIELD_NUMBER = 2; - private java.lang.Object clientName_; - public boolean hasClientName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - clientName_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getClientNameBytes() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - clientName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - src_ = ""; - clientName_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasClientName()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getClientNameBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getClientNameBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasClientName() == other.hasClientName()); - if (hasClientName()) { - result = result && getClientName() - .equals(other.getClientName()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasClientName()) { - hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; - hash = (53 * hash) + getClientName().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RecoverLeaseRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RecoverLeaseRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - clientName_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.clientName_ = clientName_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasClientName()) { - setClientName(other.getClientName()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasClientName()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - clientName_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required string clientName = 2; - private java.lang.Object clientName_ = ""; - public boolean hasClientName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - clientName_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setClientName(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - clientName_ = value; - onChanged(); - return this; - } - public Builder clearClientName() { - bitField0_ = (bitField0_ & ~0x00000002); - clientName_ = getDefaultInstance().getClientName(); - onChanged(); - return this; - } - void setClientName(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - clientName_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:RecoverLeaseRequestProto) - } - - static { - defaultInstance = new RecoverLeaseRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RecoverLeaseRequestProto) - } - - public interface RecoverLeaseResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bool result = 1; - boolean hasResult(); - boolean getResult(); - } - public static final class RecoverLeaseResponseProto extends - com.google.protobuf.GeneratedMessage - implements RecoverLeaseResponseProtoOrBuilder { - // Use RecoverLeaseResponseProto.newBuilder() to construct. - private RecoverLeaseResponseProto(Builder builder) { - super(builder); - } - private RecoverLeaseResponseProto(boolean noInit) {} - - private static final RecoverLeaseResponseProto defaultInstance; - public static RecoverLeaseResponseProto getDefaultInstance() { - return defaultInstance; - } - - public RecoverLeaseResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RecoverLeaseResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RecoverLeaseResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required bool result = 1; - public static final int RESULT_FIELD_NUMBER = 1; - private boolean result_; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - - private void initFields() { - result_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasResult()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, result_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, result_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto) obj; - - boolean result = true; - result = result && (hasResult() == other.hasResult()); - if (hasResult()) { - result = result && (getResult() - == other.getResult()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasResult()) { - hash = (37 * hash) + RESULT_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getResult()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RecoverLeaseResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RecoverLeaseResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - result_ = false; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.result_ = result_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.getDefaultInstance()) return this; - if (other.hasResult()) { - setResult(other.getResult()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasResult()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - result_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required bool result = 1; - private boolean result_ ; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - public Builder setResult(boolean value) { - bitField0_ |= 0x00000001; - result_ = value; - onChanged(); - return this; - } - public Builder clearResult() { - bitField0_ = (bitField0_ & ~0x00000001); - result_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:RecoverLeaseResponseProto) - } - - static { - defaultInstance = new RecoverLeaseResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RecoverLeaseResponseProto) - } - - public interface GetFsStatusRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class GetFsStatusRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetFsStatusRequestProtoOrBuilder { - // Use GetFsStatusRequestProto.newBuilder() to construct. - private GetFsStatusRequestProto(Builder builder) { - super(builder); - } - private GetFsStatusRequestProto(boolean noInit) {} - - private static final GetFsStatusRequestProto defaultInstance; - public static GetFsStatusRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetFsStatusRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFsStatusRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFsStatusRequestProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFsStatusRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFsStatusRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:GetFsStatusRequestProto) - } - - static { - defaultInstance = new GetFsStatusRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetFsStatusRequestProto) - } - - public interface GetFsStatsResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint64 capacity = 1; - boolean hasCapacity(); - long getCapacity(); - - // required uint64 used = 2; - boolean hasUsed(); - long getUsed(); - - // required uint64 remaining = 3; - boolean hasRemaining(); - long getRemaining(); - - // required uint64 under_replicated = 4; - boolean hasUnderReplicated(); - long getUnderReplicated(); - - // required uint64 corrupt_blocks = 5; - boolean hasCorruptBlocks(); - long getCorruptBlocks(); - - // required uint64 missing_blocks = 6; - boolean hasMissingBlocks(); - long getMissingBlocks(); - } - public static final class GetFsStatsResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetFsStatsResponseProtoOrBuilder { - // Use GetFsStatsResponseProto.newBuilder() to construct. - private GetFsStatsResponseProto(Builder builder) { - super(builder); - } - private GetFsStatsResponseProto(boolean noInit) {} - - private static final GetFsStatsResponseProto defaultInstance; - public static GetFsStatsResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetFsStatsResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFsStatsResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFsStatsResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint64 capacity = 1; - public static final int CAPACITY_FIELD_NUMBER = 1; - private long capacity_; - public boolean hasCapacity() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getCapacity() { - return capacity_; - } - - // required uint64 used = 2; - public static final int USED_FIELD_NUMBER = 2; - private long used_; - public boolean hasUsed() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getUsed() { - return used_; - } - - // required uint64 remaining = 3; - public static final int REMAINING_FIELD_NUMBER = 3; - private long remaining_; - public boolean hasRemaining() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getRemaining() { - return remaining_; - } - - // required uint64 under_replicated = 4; - public static final int UNDER_REPLICATED_FIELD_NUMBER = 4; - private long underReplicated_; - public boolean hasUnderReplicated() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public long getUnderReplicated() { - return underReplicated_; - } - - // required uint64 corrupt_blocks = 5; - public static final int CORRUPT_BLOCKS_FIELD_NUMBER = 5; - private long corruptBlocks_; - public boolean hasCorruptBlocks() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public long getCorruptBlocks() { - return corruptBlocks_; - } - - // required uint64 missing_blocks = 6; - public static final int MISSING_BLOCKS_FIELD_NUMBER = 6; - private long missingBlocks_; - public boolean hasMissingBlocks() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public long getMissingBlocks() { - return missingBlocks_; - } - - private void initFields() { - capacity_ = 0L; - used_ = 0L; - remaining_ = 0L; - underReplicated_ = 0L; - corruptBlocks_ = 0L; - missingBlocks_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasCapacity()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasUsed()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasRemaining()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasUnderReplicated()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCorruptBlocks()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasMissingBlocks()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, capacity_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, used_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, remaining_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt64(4, underReplicated_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeUInt64(5, corruptBlocks_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeUInt64(6, missingBlocks_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, capacity_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, used_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, remaining_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(4, underReplicated_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(5, corruptBlocks_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(6, missingBlocks_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto) obj; - - boolean result = true; - result = result && (hasCapacity() == other.hasCapacity()); - if (hasCapacity()) { - result = result && (getCapacity() - == other.getCapacity()); - } - result = result && (hasUsed() == other.hasUsed()); - if (hasUsed()) { - result = result && (getUsed() - == other.getUsed()); - } - result = result && (hasRemaining() == other.hasRemaining()); - if (hasRemaining()) { - result = result && (getRemaining() - == other.getRemaining()); - } - result = result && (hasUnderReplicated() == other.hasUnderReplicated()); - if (hasUnderReplicated()) { - result = result && (getUnderReplicated() - == other.getUnderReplicated()); - } - result = result && (hasCorruptBlocks() == other.hasCorruptBlocks()); - if (hasCorruptBlocks()) { - result = result && (getCorruptBlocks() - == other.getCorruptBlocks()); - } - result = result && (hasMissingBlocks() == other.hasMissingBlocks()); - if (hasMissingBlocks()) { - result = result && (getMissingBlocks() - == other.getMissingBlocks()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasCapacity()) { - hash = (37 * hash) + CAPACITY_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCapacity()); - } - if (hasUsed()) { - hash = (37 * hash) + USED_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getUsed()); - } - if (hasRemaining()) { - hash = (37 * hash) + REMAINING_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getRemaining()); - } - if (hasUnderReplicated()) { - hash = (37 * hash) + UNDER_REPLICATED_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getUnderReplicated()); - } - if (hasCorruptBlocks()) { - hash = (37 * hash) + CORRUPT_BLOCKS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCorruptBlocks()); - } - if (hasMissingBlocks()) { - hash = (37 * hash) + MISSING_BLOCKS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getMissingBlocks()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFsStatsResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFsStatsResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - capacity_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - used_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - remaining_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - underReplicated_ = 0L; - bitField0_ = (bitField0_ & ~0x00000008); - corruptBlocks_ = 0L; - bitField0_ = (bitField0_ & ~0x00000010); - missingBlocks_ = 0L; - bitField0_ = (bitField0_ & ~0x00000020); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.capacity_ = capacity_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.used_ = used_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.remaining_ = remaining_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.underReplicated_ = underReplicated_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.corruptBlocks_ = corruptBlocks_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.missingBlocks_ = missingBlocks_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.getDefaultInstance()) return this; - if (other.hasCapacity()) { - setCapacity(other.getCapacity()); - } - if (other.hasUsed()) { - setUsed(other.getUsed()); - } - if (other.hasRemaining()) { - setRemaining(other.getRemaining()); - } - if (other.hasUnderReplicated()) { - setUnderReplicated(other.getUnderReplicated()); - } - if (other.hasCorruptBlocks()) { - setCorruptBlocks(other.getCorruptBlocks()); - } - if (other.hasMissingBlocks()) { - setMissingBlocks(other.getMissingBlocks()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasCapacity()) { - - return false; - } - if (!hasUsed()) { - - return false; - } - if (!hasRemaining()) { - - return false; - } - if (!hasUnderReplicated()) { - - return false; - } - if (!hasCorruptBlocks()) { - - return false; - } - if (!hasMissingBlocks()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - capacity_ = input.readUInt64(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - used_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - remaining_ = input.readUInt64(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - underReplicated_ = input.readUInt64(); - break; - } - case 40: { - bitField0_ |= 0x00000010; - corruptBlocks_ = input.readUInt64(); - break; - } - case 48: { - bitField0_ |= 0x00000020; - missingBlocks_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required uint64 capacity = 1; - private long capacity_ ; - public boolean hasCapacity() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getCapacity() { - return capacity_; - } - public Builder setCapacity(long value) { - bitField0_ |= 0x00000001; - capacity_ = value; - onChanged(); - return this; - } - public Builder clearCapacity() { - bitField0_ = (bitField0_ & ~0x00000001); - capacity_ = 0L; - onChanged(); - return this; - } - - // required uint64 used = 2; - private long used_ ; - public boolean hasUsed() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getUsed() { - return used_; - } - public Builder setUsed(long value) { - bitField0_ |= 0x00000002; - used_ = value; - onChanged(); - return this; - } - public Builder clearUsed() { - bitField0_ = (bitField0_ & ~0x00000002); - used_ = 0L; - onChanged(); - return this; - } - - // required uint64 remaining = 3; - private long remaining_ ; - public boolean hasRemaining() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getRemaining() { - return remaining_; - } - public Builder setRemaining(long value) { - bitField0_ |= 0x00000004; - remaining_ = value; - onChanged(); - return this; - } - public Builder clearRemaining() { - bitField0_ = (bitField0_ & ~0x00000004); - remaining_ = 0L; - onChanged(); - return this; - } - - // required uint64 under_replicated = 4; - private long underReplicated_ ; - public boolean hasUnderReplicated() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public long getUnderReplicated() { - return underReplicated_; - } - public Builder setUnderReplicated(long value) { - bitField0_ |= 0x00000008; - underReplicated_ = value; - onChanged(); - return this; - } - public Builder clearUnderReplicated() { - bitField0_ = (bitField0_ & ~0x00000008); - underReplicated_ = 0L; - onChanged(); - return this; - } - - // required uint64 corrupt_blocks = 5; - private long corruptBlocks_ ; - public boolean hasCorruptBlocks() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public long getCorruptBlocks() { - return corruptBlocks_; - } - public Builder setCorruptBlocks(long value) { - bitField0_ |= 0x00000010; - corruptBlocks_ = value; - onChanged(); - return this; - } - public Builder clearCorruptBlocks() { - bitField0_ = (bitField0_ & ~0x00000010); - corruptBlocks_ = 0L; - onChanged(); - return this; - } - - // required uint64 missing_blocks = 6; - private long missingBlocks_ ; - public boolean hasMissingBlocks() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public long getMissingBlocks() { - return missingBlocks_; - } - public Builder setMissingBlocks(long value) { - bitField0_ |= 0x00000020; - missingBlocks_ = value; - onChanged(); - return this; - } - public Builder clearMissingBlocks() { - bitField0_ = (bitField0_ & ~0x00000020); - missingBlocks_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:GetFsStatsResponseProto) - } - - static { - defaultInstance = new GetFsStatsResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetFsStatsResponseProto) - } - - public interface GetDatanodeReportRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .DatanodeReportType type = 1; - boolean hasType(); - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportType getType(); - } - public static final class GetDatanodeReportRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetDatanodeReportRequestProtoOrBuilder { - // Use GetDatanodeReportRequestProto.newBuilder() to construct. - private GetDatanodeReportRequestProto(Builder builder) { - super(builder); - } - private GetDatanodeReportRequestProto(boolean noInit) {} - - private static final GetDatanodeReportRequestProto defaultInstance; - public static GetDatanodeReportRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetDatanodeReportRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDatanodeReportRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDatanodeReportRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .DatanodeReportType type = 1; - public static final int TYPE_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportType type_; - public boolean hasType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportType getType() { - return type_; - } - - private void initFields() { - type_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportType.ALL; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasType()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, type_.getNumber()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, type_.getNumber()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto) obj; - - boolean result = true; - result = result && (hasType() == other.hasType()); - if (hasType()) { - result = result && - (getType() == other.getType()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasType()) { - hash = (37 * hash) + TYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getType()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDatanodeReportRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDatanodeReportRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - type_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportType.ALL; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.type_ = type_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.getDefaultInstance()) return this; - if (other.hasType()) { - setType(other.getType()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasType()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportType value = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - type_ = value; - } - break; - } - } - } - } - - private int bitField0_; - - // required .DatanodeReportType type = 1; - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportType type_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportType.ALL; - public boolean hasType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportType getType() { - return type_; - } - public Builder setType(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - type_ = value; - onChanged(); - return this; - } - public Builder clearType() { - bitField0_ = (bitField0_ & ~0x00000001); - type_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportType.ALL; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:GetDatanodeReportRequestProto) - } - - static { - defaultInstance = new GetDatanodeReportRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetDatanodeReportRequestProto) - } - - public interface GetDatanodeReportResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .DatanodeInfoProto di = 1; - java.util.List - getDiList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDi(int index); - int getDiCount(); - java.util.List - getDiOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDiOrBuilder( - int index); - } - public static final class GetDatanodeReportResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetDatanodeReportResponseProtoOrBuilder { - // Use GetDatanodeReportResponseProto.newBuilder() to construct. - private GetDatanodeReportResponseProto(Builder builder) { - super(builder); - } - private GetDatanodeReportResponseProto(boolean noInit) {} - - private static final GetDatanodeReportResponseProto defaultInstance; - public static GetDatanodeReportResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetDatanodeReportResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDatanodeReportResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDatanodeReportResponseProto_fieldAccessorTable; - } - - // repeated .DatanodeInfoProto di = 1; - public static final int DI_FIELD_NUMBER = 1; - private java.util.List di_; - public java.util.List getDiList() { - return di_; - } - public java.util.List - getDiOrBuilderList() { - return di_; - } - public int getDiCount() { - return di_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDi(int index) { - return di_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDiOrBuilder( - int index) { - return di_.get(index); - } - - private void initFields() { - di_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getDiCount(); i++) { - if (!getDi(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < di_.size(); i++) { - output.writeMessage(1, di_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < di_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, di_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto) obj; - - boolean result = true; - result = result && getDiList() - .equals(other.getDiList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getDiCount() > 0) { - hash = (37 * hash) + DI_FIELD_NUMBER; - hash = (53 * hash) + getDiList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDatanodeReportResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDatanodeReportResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getDiFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (diBuilder_ == null) { - di_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - diBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto(this); - int from_bitField0_ = bitField0_; - if (diBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - di_ = java.util.Collections.unmodifiableList(di_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.di_ = di_; - } else { - result.di_ = diBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.getDefaultInstance()) return this; - if (diBuilder_ == null) { - if (!other.di_.isEmpty()) { - if (di_.isEmpty()) { - di_ = other.di_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureDiIsMutable(); - di_.addAll(other.di_); - } - onChanged(); - } - } else { - if (!other.di_.isEmpty()) { - if (diBuilder_.isEmpty()) { - diBuilder_.dispose(); - diBuilder_ = null; - di_ = other.di_; - bitField0_ = (bitField0_ & ~0x00000001); - diBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getDiFieldBuilder() : null; - } else { - diBuilder_.addAllMessages(other.di_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getDiCount(); i++) { - if (!getDi(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addDi(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // repeated .DatanodeInfoProto di = 1; - private java.util.List di_ = - java.util.Collections.emptyList(); - private void ensureDiIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - di_ = new java.util.ArrayList(di_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> diBuilder_; - - public java.util.List getDiList() { - if (diBuilder_ == null) { - return java.util.Collections.unmodifiableList(di_); - } else { - return diBuilder_.getMessageList(); - } - } - public int getDiCount() { - if (diBuilder_ == null) { - return di_.size(); - } else { - return diBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDi(int index) { - if (diBuilder_ == null) { - return di_.get(index); - } else { - return diBuilder_.getMessage(index); - } - } - public Builder setDi( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (diBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureDiIsMutable(); - di_.set(index, value); - onChanged(); - } else { - diBuilder_.setMessage(index, value); - } - return this; - } - public Builder setDi( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (diBuilder_ == null) { - ensureDiIsMutable(); - di_.set(index, builderForValue.build()); - onChanged(); - } else { - diBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addDi(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (diBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureDiIsMutable(); - di_.add(value); - onChanged(); - } else { - diBuilder_.addMessage(value); - } - return this; - } - public Builder addDi( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (diBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureDiIsMutable(); - di_.add(index, value); - onChanged(); - } else { - diBuilder_.addMessage(index, value); - } - return this; - } - public Builder addDi( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (diBuilder_ == null) { - ensureDiIsMutable(); - di_.add(builderForValue.build()); - onChanged(); - } else { - diBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addDi( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (diBuilder_ == null) { - ensureDiIsMutable(); - di_.add(index, builderForValue.build()); - onChanged(); - } else { - diBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllDi( - java.lang.Iterable values) { - if (diBuilder_ == null) { - ensureDiIsMutable(); - super.addAll(values, di_); - onChanged(); - } else { - diBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearDi() { - if (diBuilder_ == null) { - di_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - diBuilder_.clear(); - } - return this; - } - public Builder removeDi(int index) { - if (diBuilder_ == null) { - ensureDiIsMutable(); - di_.remove(index); - onChanged(); - } else { - diBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getDiBuilder( - int index) { - return getDiFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDiOrBuilder( - int index) { - if (diBuilder_ == null) { - return di_.get(index); } else { - return diBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getDiOrBuilderList() { - if (diBuilder_ != null) { - return diBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(di_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDiBuilder() { - return getDiFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDiBuilder( - int index) { - return getDiFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public java.util.List - getDiBuilderList() { - return getDiFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> - getDiFieldBuilder() { - if (diBuilder_ == null) { - diBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( - di_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - di_ = null; - } - return diBuilder_; - } - - // @@protoc_insertion_point(builder_scope:GetDatanodeReportResponseProto) - } - - static { - defaultInstance = new GetDatanodeReportResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetDatanodeReportResponseProto) - } - - public interface GetPreferredBlockSizeRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string filename = 1; - boolean hasFilename(); - String getFilename(); - } - public static final class GetPreferredBlockSizeRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetPreferredBlockSizeRequestProtoOrBuilder { - // Use GetPreferredBlockSizeRequestProto.newBuilder() to construct. - private GetPreferredBlockSizeRequestProto(Builder builder) { - super(builder); - } - private GetPreferredBlockSizeRequestProto(boolean noInit) {} - - private static final GetPreferredBlockSizeRequestProto defaultInstance; - public static GetPreferredBlockSizeRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetPreferredBlockSizeRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetPreferredBlockSizeRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetPreferredBlockSizeRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string filename = 1; - public static final int FILENAME_FIELD_NUMBER = 1; - private java.lang.Object filename_; - public boolean hasFilename() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getFilename() { - java.lang.Object ref = filename_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - filename_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getFilenameBytes() { - java.lang.Object ref = filename_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - filename_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - filename_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasFilename()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getFilenameBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getFilenameBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto) obj; - - boolean result = true; - result = result && (hasFilename() == other.hasFilename()); - if (hasFilename()) { - result = result && getFilename() - .equals(other.getFilename()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasFilename()) { - hash = (37 * hash) + FILENAME_FIELD_NUMBER; - hash = (53 * hash) + getFilename().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetPreferredBlockSizeRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetPreferredBlockSizeRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - filename_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.filename_ = filename_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.getDefaultInstance()) return this; - if (other.hasFilename()) { - setFilename(other.getFilename()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasFilename()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - filename_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string filename = 1; - private java.lang.Object filename_ = ""; - public boolean hasFilename() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getFilename() { - java.lang.Object ref = filename_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - filename_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setFilename(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - filename_ = value; - onChanged(); - return this; - } - public Builder clearFilename() { - bitField0_ = (bitField0_ & ~0x00000001); - filename_ = getDefaultInstance().getFilename(); - onChanged(); - return this; - } - void setFilename(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - filename_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:GetPreferredBlockSizeRequestProto) - } - - static { - defaultInstance = new GetPreferredBlockSizeRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetPreferredBlockSizeRequestProto) - } - - public interface GetPreferredBlockSizeResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint64 bsize = 1; - boolean hasBsize(); - long getBsize(); - } - public static final class GetPreferredBlockSizeResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetPreferredBlockSizeResponseProtoOrBuilder { - // Use GetPreferredBlockSizeResponseProto.newBuilder() to construct. - private GetPreferredBlockSizeResponseProto(Builder builder) { - super(builder); - } - private GetPreferredBlockSizeResponseProto(boolean noInit) {} - - private static final GetPreferredBlockSizeResponseProto defaultInstance; - public static GetPreferredBlockSizeResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetPreferredBlockSizeResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetPreferredBlockSizeResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetPreferredBlockSizeResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint64 bsize = 1; - public static final int BSIZE_FIELD_NUMBER = 1; - private long bsize_; - public boolean hasBsize() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getBsize() { - return bsize_; - } - - private void initFields() { - bsize_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBsize()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, bsize_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, bsize_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto) obj; - - boolean result = true; - result = result && (hasBsize() == other.hasBsize()); - if (hasBsize()) { - result = result && (getBsize() - == other.getBsize()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBsize()) { - hash = (37 * hash) + BSIZE_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getBsize()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetPreferredBlockSizeResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetPreferredBlockSizeResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - bsize_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.bsize_ = bsize_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.getDefaultInstance()) return this; - if (other.hasBsize()) { - setBsize(other.getBsize()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBsize()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - bsize_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required uint64 bsize = 1; - private long bsize_ ; - public boolean hasBsize() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getBsize() { - return bsize_; - } - public Builder setBsize(long value) { - bitField0_ |= 0x00000001; - bsize_ = value; - onChanged(); - return this; - } - public Builder clearBsize() { - bitField0_ = (bitField0_ & ~0x00000001); - bsize_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:GetPreferredBlockSizeResponseProto) - } - - static { - defaultInstance = new GetPreferredBlockSizeResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetPreferredBlockSizeResponseProto) - } - - public interface SetSafeModeRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .SafeModeAction action = 1; - boolean hasAction(); - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeAction getAction(); - } - public static final class SetSafeModeRequestProto extends - com.google.protobuf.GeneratedMessage - implements SetSafeModeRequestProtoOrBuilder { - // Use SetSafeModeRequestProto.newBuilder() to construct. - private SetSafeModeRequestProto(Builder builder) { - super(builder); - } - private SetSafeModeRequestProto(boolean noInit) {} - - private static final SetSafeModeRequestProto defaultInstance; - public static SetSafeModeRequestProto getDefaultInstance() { - return defaultInstance; - } - - public SetSafeModeRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetSafeModeRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetSafeModeRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .SafeModeAction action = 1; - public static final int ACTION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeAction action_; - public boolean hasAction() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeAction getAction() { - return action_; - } - - private void initFields() { - action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeAction.SAFEMODE_LEAVE; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasAction()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, action_.getNumber()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, action_.getNumber()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto) obj; - - boolean result = true; - result = result && (hasAction() == other.hasAction()); - if (hasAction()) { - result = result && - (getAction() == other.getAction()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasAction()) { - hash = (37 * hash) + ACTION_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getAction()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetSafeModeRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetSafeModeRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeAction.SAFEMODE_LEAVE; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.action_ = action_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.getDefaultInstance()) return this; - if (other.hasAction()) { - setAction(other.getAction()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasAction()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeAction value = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeAction.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - action_ = value; - } - break; - } - } - } - } - - private int bitField0_; - - // required .SafeModeAction action = 1; - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeAction action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeAction.SAFEMODE_LEAVE; - public boolean hasAction() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeAction getAction() { - return action_; - } - public Builder setAction(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeAction value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - action_ = value; - onChanged(); - return this; - } - public Builder clearAction() { - bitField0_ = (bitField0_ & ~0x00000001); - action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeAction.SAFEMODE_LEAVE; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:SetSafeModeRequestProto) - } - - static { - defaultInstance = new SetSafeModeRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SetSafeModeRequestProto) - } - - public interface SetSafeModeResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bool result = 1; - boolean hasResult(); - boolean getResult(); - } - public static final class SetSafeModeResponseProto extends - com.google.protobuf.GeneratedMessage - implements SetSafeModeResponseProtoOrBuilder { - // Use SetSafeModeResponseProto.newBuilder() to construct. - private SetSafeModeResponseProto(Builder builder) { - super(builder); - } - private SetSafeModeResponseProto(boolean noInit) {} - - private static final SetSafeModeResponseProto defaultInstance; - public static SetSafeModeResponseProto getDefaultInstance() { - return defaultInstance; - } - - public SetSafeModeResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetSafeModeResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetSafeModeResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required bool result = 1; - public static final int RESULT_FIELD_NUMBER = 1; - private boolean result_; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - - private void initFields() { - result_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasResult()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, result_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, result_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto) obj; - - boolean result = true; - result = result && (hasResult() == other.hasResult()); - if (hasResult()) { - result = result && (getResult() - == other.getResult()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasResult()) { - hash = (37 * hash) + RESULT_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getResult()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetSafeModeResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetSafeModeResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - result_ = false; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.result_ = result_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.getDefaultInstance()) return this; - if (other.hasResult()) { - setResult(other.getResult()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasResult()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - result_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required bool result = 1; - private boolean result_ ; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - public Builder setResult(boolean value) { - bitField0_ |= 0x00000001; - result_ = value; - onChanged(); - return this; - } - public Builder clearResult() { - bitField0_ = (bitField0_ & ~0x00000001); - result_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:SetSafeModeResponseProto) - } - - static { - defaultInstance = new SetSafeModeResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SetSafeModeResponseProto) - } - - public interface SaveNamespaceRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class SaveNamespaceRequestProto extends - com.google.protobuf.GeneratedMessage - implements SaveNamespaceRequestProtoOrBuilder { - // Use SaveNamespaceRequestProto.newBuilder() to construct. - private SaveNamespaceRequestProto(Builder builder) { - super(builder); - } - private SaveNamespaceRequestProto(boolean noInit) {} - - private static final SaveNamespaceRequestProto defaultInstance; - public static SaveNamespaceRequestProto getDefaultInstance() { - return defaultInstance; - } - - public SaveNamespaceRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SaveNamespaceRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SaveNamespaceRequestProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SaveNamespaceRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SaveNamespaceRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:SaveNamespaceRequestProto) - } - - static { - defaultInstance = new SaveNamespaceRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SaveNamespaceRequestProto) - } - - public interface SaveNamespaceResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class SaveNamespaceResponseProto extends - com.google.protobuf.GeneratedMessage - implements SaveNamespaceResponseProtoOrBuilder { - // Use SaveNamespaceResponseProto.newBuilder() to construct. - private SaveNamespaceResponseProto(Builder builder) { - super(builder); - } - private SaveNamespaceResponseProto(boolean noInit) {} - - private static final SaveNamespaceResponseProto defaultInstance; - public static SaveNamespaceResponseProto getDefaultInstance() { - return defaultInstance; - } - - public SaveNamespaceResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SaveNamespaceResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SaveNamespaceResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SaveNamespaceResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SaveNamespaceResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:SaveNamespaceResponseProto) - } - - static { - defaultInstance = new SaveNamespaceResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SaveNamespaceResponseProto) - } - - public interface RestoreFailedStorageRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string arg = 1; - boolean hasArg(); - String getArg(); - } - public static final class RestoreFailedStorageRequestProto extends - com.google.protobuf.GeneratedMessage - implements RestoreFailedStorageRequestProtoOrBuilder { - // Use RestoreFailedStorageRequestProto.newBuilder() to construct. - private RestoreFailedStorageRequestProto(Builder builder) { - super(builder); - } - private RestoreFailedStorageRequestProto(boolean noInit) {} - - private static final RestoreFailedStorageRequestProto defaultInstance; - public static RestoreFailedStorageRequestProto getDefaultInstance() { - return defaultInstance; - } - - public RestoreFailedStorageRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RestoreFailedStorageRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RestoreFailedStorageRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string arg = 1; - public static final int ARG_FIELD_NUMBER = 1; - private java.lang.Object arg_; - public boolean hasArg() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getArg() { - java.lang.Object ref = arg_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - arg_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getArgBytes() { - java.lang.Object ref = arg_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - arg_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - arg_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasArg()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getArgBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getArgBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto) obj; - - boolean result = true; - result = result && (hasArg() == other.hasArg()); - if (hasArg()) { - result = result && getArg() - .equals(other.getArg()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasArg()) { - hash = (37 * hash) + ARG_FIELD_NUMBER; - hash = (53 * hash) + getArg().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RestoreFailedStorageRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RestoreFailedStorageRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - arg_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.arg_ = arg_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.getDefaultInstance()) return this; - if (other.hasArg()) { - setArg(other.getArg()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasArg()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - arg_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string arg = 1; - private java.lang.Object arg_ = ""; - public boolean hasArg() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getArg() { - java.lang.Object ref = arg_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - arg_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setArg(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - arg_ = value; - onChanged(); - return this; - } - public Builder clearArg() { - bitField0_ = (bitField0_ & ~0x00000001); - arg_ = getDefaultInstance().getArg(); - onChanged(); - return this; - } - void setArg(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - arg_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:RestoreFailedStorageRequestProto) - } - - static { - defaultInstance = new RestoreFailedStorageRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RestoreFailedStorageRequestProto) - } - - public interface RestoreFailedStorageResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bool result = 1; - boolean hasResult(); - boolean getResult(); - } - public static final class RestoreFailedStorageResponseProto extends - com.google.protobuf.GeneratedMessage - implements RestoreFailedStorageResponseProtoOrBuilder { - // Use RestoreFailedStorageResponseProto.newBuilder() to construct. - private RestoreFailedStorageResponseProto(Builder builder) { - super(builder); - } - private RestoreFailedStorageResponseProto(boolean noInit) {} - - private static final RestoreFailedStorageResponseProto defaultInstance; - public static RestoreFailedStorageResponseProto getDefaultInstance() { - return defaultInstance; - } - - public RestoreFailedStorageResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RestoreFailedStorageResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RestoreFailedStorageResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required bool result = 1; - public static final int RESULT_FIELD_NUMBER = 1; - private boolean result_; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - - private void initFields() { - result_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasResult()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, result_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, result_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto) obj; - - boolean result = true; - result = result && (hasResult() == other.hasResult()); - if (hasResult()) { - result = result && (getResult() - == other.getResult()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasResult()) { - hash = (37 * hash) + RESULT_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getResult()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RestoreFailedStorageResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RestoreFailedStorageResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - result_ = false; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.result_ = result_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.getDefaultInstance()) return this; - if (other.hasResult()) { - setResult(other.getResult()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasResult()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - result_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required bool result = 1; - private boolean result_ ; - public boolean hasResult() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getResult() { - return result_; - } - public Builder setResult(boolean value) { - bitField0_ |= 0x00000001; - result_ = value; - onChanged(); - return this; - } - public Builder clearResult() { - bitField0_ = (bitField0_ & ~0x00000001); - result_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:RestoreFailedStorageResponseProto) - } - - static { - defaultInstance = new RestoreFailedStorageResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RestoreFailedStorageResponseProto) - } - - public interface RefreshNodesRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class RefreshNodesRequestProto extends - com.google.protobuf.GeneratedMessage - implements RefreshNodesRequestProtoOrBuilder { - // Use RefreshNodesRequestProto.newBuilder() to construct. - private RefreshNodesRequestProto(Builder builder) { - super(builder); - } - private RefreshNodesRequestProto(boolean noInit) {} - - private static final RefreshNodesRequestProto defaultInstance; - public static RefreshNodesRequestProto getDefaultInstance() { - return defaultInstance; - } - - public RefreshNodesRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RefreshNodesRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RefreshNodesRequestProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RefreshNodesRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RefreshNodesRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:RefreshNodesRequestProto) - } - - static { - defaultInstance = new RefreshNodesRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RefreshNodesRequestProto) - } - - public interface RefreshNodesResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class RefreshNodesResponseProto extends - com.google.protobuf.GeneratedMessage - implements RefreshNodesResponseProtoOrBuilder { - // Use RefreshNodesResponseProto.newBuilder() to construct. - private RefreshNodesResponseProto(Builder builder) { - super(builder); - } - private RefreshNodesResponseProto(boolean noInit) {} - - private static final RefreshNodesResponseProto defaultInstance; - public static RefreshNodesResponseProto getDefaultInstance() { - return defaultInstance; - } - - public RefreshNodesResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RefreshNodesResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RefreshNodesResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RefreshNodesResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RefreshNodesResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:RefreshNodesResponseProto) - } - - static { - defaultInstance = new RefreshNodesResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RefreshNodesResponseProto) - } - - public interface FinalizeUpgradeRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class FinalizeUpgradeRequestProto extends - com.google.protobuf.GeneratedMessage - implements FinalizeUpgradeRequestProtoOrBuilder { - // Use FinalizeUpgradeRequestProto.newBuilder() to construct. - private FinalizeUpgradeRequestProto(Builder builder) { - super(builder); - } - private FinalizeUpgradeRequestProto(boolean noInit) {} - - private static final FinalizeUpgradeRequestProto defaultInstance; - public static FinalizeUpgradeRequestProto getDefaultInstance() { - return defaultInstance; - } - - public FinalizeUpgradeRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FinalizeUpgradeRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FinalizeUpgradeRequestProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FinalizeUpgradeRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FinalizeUpgradeRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:FinalizeUpgradeRequestProto) - } - - static { - defaultInstance = new FinalizeUpgradeRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:FinalizeUpgradeRequestProto) - } - - public interface FinalizeUpgradeResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class FinalizeUpgradeResponseProto extends - com.google.protobuf.GeneratedMessage - implements FinalizeUpgradeResponseProtoOrBuilder { - // Use FinalizeUpgradeResponseProto.newBuilder() to construct. - private FinalizeUpgradeResponseProto(Builder builder) { - super(builder); - } - private FinalizeUpgradeResponseProto(boolean noInit) {} - - private static final FinalizeUpgradeResponseProto defaultInstance; - public static FinalizeUpgradeResponseProto getDefaultInstance() { - return defaultInstance; - } - - public FinalizeUpgradeResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FinalizeUpgradeResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FinalizeUpgradeResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FinalizeUpgradeResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FinalizeUpgradeResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:FinalizeUpgradeResponseProto) - } - - static { - defaultInstance = new FinalizeUpgradeResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:FinalizeUpgradeResponseProto) - } - - public interface DistributedUpgradeProgressRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .UpgradeAction action = 1; - boolean hasAction(); - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeAction getAction(); - } - public static final class DistributedUpgradeProgressRequestProto extends - com.google.protobuf.GeneratedMessage - implements DistributedUpgradeProgressRequestProtoOrBuilder { - // Use DistributedUpgradeProgressRequestProto.newBuilder() to construct. - private DistributedUpgradeProgressRequestProto(Builder builder) { - super(builder); - } - private DistributedUpgradeProgressRequestProto(boolean noInit) {} - - private static final DistributedUpgradeProgressRequestProto defaultInstance; - public static DistributedUpgradeProgressRequestProto getDefaultInstance() { - return defaultInstance; - } - - public DistributedUpgradeProgressRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DistributedUpgradeProgressRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DistributedUpgradeProgressRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .UpgradeAction action = 1; - public static final int ACTION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeAction action_; - public boolean hasAction() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeAction getAction() { - return action_; - } - - private void initFields() { - action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeAction.GET_STATUS; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasAction()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, action_.getNumber()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, action_.getNumber()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto) obj; - - boolean result = true; - result = result && (hasAction() == other.hasAction()); - if (hasAction()) { - result = result && - (getAction() == other.getAction()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasAction()) { - hash = (37 * hash) + ACTION_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getAction()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DistributedUpgradeProgressRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DistributedUpgradeProgressRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeAction.GET_STATUS; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.action_ = action_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto.getDefaultInstance()) return this; - if (other.hasAction()) { - setAction(other.getAction()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasAction()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeAction value = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeAction.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - action_ = value; - } - break; - } - } - } - } - - private int bitField0_; - - // required .UpgradeAction action = 1; - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeAction action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeAction.GET_STATUS; - public boolean hasAction() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeAction getAction() { - return action_; - } - public Builder setAction(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeAction value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - action_ = value; - onChanged(); - return this; - } - public Builder clearAction() { - bitField0_ = (bitField0_ & ~0x00000001); - action_ = org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeAction.GET_STATUS; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:DistributedUpgradeProgressRequestProto) - } - - static { - defaultInstance = new DistributedUpgradeProgressRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:DistributedUpgradeProgressRequestProto) - } - - public interface DistributedUpgradeProgressResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .UpgradeStatusReportProto report = 1; - boolean hasReport(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto getReport(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProtoOrBuilder getReportOrBuilder(); - } - public static final class DistributedUpgradeProgressResponseProto extends - com.google.protobuf.GeneratedMessage - implements DistributedUpgradeProgressResponseProtoOrBuilder { - // Use DistributedUpgradeProgressResponseProto.newBuilder() to construct. - private DistributedUpgradeProgressResponseProto(Builder builder) { - super(builder); - } - private DistributedUpgradeProgressResponseProto(boolean noInit) {} - - private static final DistributedUpgradeProgressResponseProto defaultInstance; - public static DistributedUpgradeProgressResponseProto getDefaultInstance() { - return defaultInstance; - } - - public DistributedUpgradeProgressResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DistributedUpgradeProgressResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DistributedUpgradeProgressResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .UpgradeStatusReportProto report = 1; - public static final int REPORT_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto report_; - public boolean hasReport() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto getReport() { - return report_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProtoOrBuilder getReportOrBuilder() { - return report_; - } - - private void initFields() { - report_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasReport()) { - memoizedIsInitialized = 0; - return false; - } - if (!getReport().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, report_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, report_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto) obj; - - boolean result = true; - result = result && (hasReport() == other.hasReport()); - if (hasReport()) { - result = result && getReport() - .equals(other.getReport()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasReport()) { - hash = (37 * hash) + REPORT_FIELD_NUMBER; - hash = (53 * hash) + getReport().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DistributedUpgradeProgressResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_DistributedUpgradeProgressResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getReportFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (reportBuilder_ == null) { - report_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.getDefaultInstance(); - } else { - reportBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (reportBuilder_ == null) { - result.report_ = report_; - } else { - result.report_ = reportBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto.getDefaultInstance()) return this; - if (other.hasReport()) { - mergeReport(other.getReport()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasReport()) { - - return false; - } - if (!getReport().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.newBuilder(); - if (hasReport()) { - subBuilder.mergeFrom(getReport()); - } - input.readMessage(subBuilder, extensionRegistry); - setReport(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .UpgradeStatusReportProto report = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto report_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProtoOrBuilder> reportBuilder_; - public boolean hasReport() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto getReport() { - if (reportBuilder_ == null) { - return report_; - } else { - return reportBuilder_.getMessage(); - } - } - public Builder setReport(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto value) { - if (reportBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - report_ = value; - onChanged(); - } else { - reportBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setReport( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.Builder builderForValue) { - if (reportBuilder_ == null) { - report_ = builderForValue.build(); - onChanged(); - } else { - reportBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeReport(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto value) { - if (reportBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - report_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.getDefaultInstance()) { - report_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.newBuilder(report_).mergeFrom(value).buildPartial(); - } else { - report_ = value; - } - onChanged(); - } else { - reportBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearReport() { - if (reportBuilder_ == null) { - report_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.getDefaultInstance(); - onChanged(); - } else { - reportBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.Builder getReportBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getReportFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProtoOrBuilder getReportOrBuilder() { - if (reportBuilder_ != null) { - return reportBuilder_.getMessageOrBuilder(); - } else { - return report_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProtoOrBuilder> - getReportFieldBuilder() { - if (reportBuilder_ == null) { - reportBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProtoOrBuilder>( - report_, - getParentForChildren(), - isClean()); - report_ = null; - } - return reportBuilder_; - } - - // @@protoc_insertion_point(builder_scope:DistributedUpgradeProgressResponseProto) - } - - static { - defaultInstance = new DistributedUpgradeProgressResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:DistributedUpgradeProgressResponseProto) - } - - public interface ListCorruptFileBlocksRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string path = 1; - boolean hasPath(); - String getPath(); - - // required string cookie = 2; - boolean hasCookie(); - String getCookie(); - } - public static final class ListCorruptFileBlocksRequestProto extends - com.google.protobuf.GeneratedMessage - implements ListCorruptFileBlocksRequestProtoOrBuilder { - // Use ListCorruptFileBlocksRequestProto.newBuilder() to construct. - private ListCorruptFileBlocksRequestProto(Builder builder) { - super(builder); - } - private ListCorruptFileBlocksRequestProto(boolean noInit) {} - - private static final ListCorruptFileBlocksRequestProto defaultInstance; - public static ListCorruptFileBlocksRequestProto getDefaultInstance() { - return defaultInstance; - } - - public ListCorruptFileBlocksRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ListCorruptFileBlocksRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ListCorruptFileBlocksRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string path = 1; - public static final int PATH_FIELD_NUMBER = 1; - private java.lang.Object path_; - public boolean hasPath() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getPath() { - java.lang.Object ref = path_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - path_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getPathBytes() { - java.lang.Object ref = path_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - path_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string cookie = 2; - public static final int COOKIE_FIELD_NUMBER = 2; - private java.lang.Object cookie_; - public boolean hasCookie() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getCookie() { - java.lang.Object ref = cookie_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - cookie_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getCookieBytes() { - java.lang.Object ref = cookie_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - cookie_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - path_ = ""; - cookie_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasPath()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCookie()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getPathBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getCookieBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getPathBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getCookieBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto) obj; - - boolean result = true; - result = result && (hasPath() == other.hasPath()); - if (hasPath()) { - result = result && getPath() - .equals(other.getPath()); - } - result = result && (hasCookie() == other.hasCookie()); - if (hasCookie()) { - result = result && getCookie() - .equals(other.getCookie()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasPath()) { - hash = (37 * hash) + PATH_FIELD_NUMBER; - hash = (53 * hash) + getPath().hashCode(); - } - if (hasCookie()) { - hash = (37 * hash) + COOKIE_FIELD_NUMBER; - hash = (53 * hash) + getCookie().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ListCorruptFileBlocksRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ListCorruptFileBlocksRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - path_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - cookie_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.path_ = path_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.cookie_ = cookie_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.getDefaultInstance()) return this; - if (other.hasPath()) { - setPath(other.getPath()); - } - if (other.hasCookie()) { - setCookie(other.getCookie()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasPath()) { - - return false; - } - if (!hasCookie()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - path_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - cookie_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string path = 1; - private java.lang.Object path_ = ""; - public boolean hasPath() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getPath() { - java.lang.Object ref = path_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - path_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setPath(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - path_ = value; - onChanged(); - return this; - } - public Builder clearPath() { - bitField0_ = (bitField0_ & ~0x00000001); - path_ = getDefaultInstance().getPath(); - onChanged(); - return this; - } - void setPath(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - path_ = value; - onChanged(); - } - - // required string cookie = 2; - private java.lang.Object cookie_ = ""; - public boolean hasCookie() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getCookie() { - java.lang.Object ref = cookie_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - cookie_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setCookie(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - cookie_ = value; - onChanged(); - return this; - } - public Builder clearCookie() { - bitField0_ = (bitField0_ & ~0x00000002); - cookie_ = getDefaultInstance().getCookie(); - onChanged(); - return this; - } - void setCookie(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - cookie_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:ListCorruptFileBlocksRequestProto) - } - - static { - defaultInstance = new ListCorruptFileBlocksRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ListCorruptFileBlocksRequestProto) - } - - public interface ListCorruptFileBlocksResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .CorruptFileBlocksProto corrupt = 1; - boolean hasCorrupt(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getCorrupt(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder getCorruptOrBuilder(); - } - public static final class ListCorruptFileBlocksResponseProto extends - com.google.protobuf.GeneratedMessage - implements ListCorruptFileBlocksResponseProtoOrBuilder { - // Use ListCorruptFileBlocksResponseProto.newBuilder() to construct. - private ListCorruptFileBlocksResponseProto(Builder builder) { - super(builder); - } - private ListCorruptFileBlocksResponseProto(boolean noInit) {} - - private static final ListCorruptFileBlocksResponseProto defaultInstance; - public static ListCorruptFileBlocksResponseProto getDefaultInstance() { - return defaultInstance; - } - - public ListCorruptFileBlocksResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ListCorruptFileBlocksResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ListCorruptFileBlocksResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .CorruptFileBlocksProto corrupt = 1; - public static final int CORRUPT_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto corrupt_; - public boolean hasCorrupt() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getCorrupt() { - return corrupt_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder getCorruptOrBuilder() { - return corrupt_; - } - - private void initFields() { - corrupt_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasCorrupt()) { - memoizedIsInitialized = 0; - return false; - } - if (!getCorrupt().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, corrupt_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, corrupt_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto) obj; - - boolean result = true; - result = result && (hasCorrupt() == other.hasCorrupt()); - if (hasCorrupt()) { - result = result && getCorrupt() - .equals(other.getCorrupt()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasCorrupt()) { - hash = (37 * hash) + CORRUPT_FIELD_NUMBER; - hash = (53 * hash) + getCorrupt().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ListCorruptFileBlocksResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_ListCorruptFileBlocksResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getCorruptFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (corruptBuilder_ == null) { - corrupt_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance(); - } else { - corruptBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (corruptBuilder_ == null) { - result.corrupt_ = corrupt_; - } else { - result.corrupt_ = corruptBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.getDefaultInstance()) return this; - if (other.hasCorrupt()) { - mergeCorrupt(other.getCorrupt()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasCorrupt()) { - - return false; - } - if (!getCorrupt().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.newBuilder(); - if (hasCorrupt()) { - subBuilder.mergeFrom(getCorrupt()); - } - input.readMessage(subBuilder, extensionRegistry); - setCorrupt(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .CorruptFileBlocksProto corrupt = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto corrupt_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder> corruptBuilder_; - public boolean hasCorrupt() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getCorrupt() { - if (corruptBuilder_ == null) { - return corrupt_; - } else { - return corruptBuilder_.getMessage(); - } - } - public Builder setCorrupt(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto value) { - if (corruptBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - corrupt_ = value; - onChanged(); - } else { - corruptBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setCorrupt( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder builderForValue) { - if (corruptBuilder_ == null) { - corrupt_ = builderForValue.build(); - onChanged(); - } else { - corruptBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeCorrupt(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto value) { - if (corruptBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - corrupt_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance()) { - corrupt_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.newBuilder(corrupt_).mergeFrom(value).buildPartial(); - } else { - corrupt_ = value; - } - onChanged(); - } else { - corruptBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearCorrupt() { - if (corruptBuilder_ == null) { - corrupt_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance(); - onChanged(); - } else { - corruptBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder getCorruptBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getCorruptFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder getCorruptOrBuilder() { - if (corruptBuilder_ != null) { - return corruptBuilder_.getMessageOrBuilder(); - } else { - return corrupt_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder> - getCorruptFieldBuilder() { - if (corruptBuilder_ == null) { - corruptBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder>( - corrupt_, - getParentForChildren(), - isClean()); - corrupt_ = null; - } - return corruptBuilder_; - } - - // @@protoc_insertion_point(builder_scope:ListCorruptFileBlocksResponseProto) - } - - static { - defaultInstance = new ListCorruptFileBlocksResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ListCorruptFileBlocksResponseProto) - } - - public interface MetaSaveRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string filename = 1; - boolean hasFilename(); - String getFilename(); - } - public static final class MetaSaveRequestProto extends - com.google.protobuf.GeneratedMessage - implements MetaSaveRequestProtoOrBuilder { - // Use MetaSaveRequestProto.newBuilder() to construct. - private MetaSaveRequestProto(Builder builder) { - super(builder); - } - private MetaSaveRequestProto(boolean noInit) {} - - private static final MetaSaveRequestProto defaultInstance; - public static MetaSaveRequestProto getDefaultInstance() { - return defaultInstance; - } - - public MetaSaveRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MetaSaveRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MetaSaveRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string filename = 1; - public static final int FILENAME_FIELD_NUMBER = 1; - private java.lang.Object filename_; - public boolean hasFilename() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getFilename() { - java.lang.Object ref = filename_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - filename_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getFilenameBytes() { - java.lang.Object ref = filename_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - filename_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - filename_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasFilename()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getFilenameBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getFilenameBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto) obj; - - boolean result = true; - result = result && (hasFilename() == other.hasFilename()); - if (hasFilename()) { - result = result && getFilename() - .equals(other.getFilename()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasFilename()) { - hash = (37 * hash) + FILENAME_FIELD_NUMBER; - hash = (53 * hash) + getFilename().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MetaSaveRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MetaSaveRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - filename_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.filename_ = filename_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.getDefaultInstance()) return this; - if (other.hasFilename()) { - setFilename(other.getFilename()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasFilename()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - filename_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string filename = 1; - private java.lang.Object filename_ = ""; - public boolean hasFilename() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getFilename() { - java.lang.Object ref = filename_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - filename_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setFilename(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - filename_ = value; - onChanged(); - return this; - } - public Builder clearFilename() { - bitField0_ = (bitField0_ & ~0x00000001); - filename_ = getDefaultInstance().getFilename(); - onChanged(); - return this; - } - void setFilename(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - filename_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:MetaSaveRequestProto) - } - - static { - defaultInstance = new MetaSaveRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:MetaSaveRequestProto) - } - - public interface MetaSaveResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class MetaSaveResponseProto extends - com.google.protobuf.GeneratedMessage - implements MetaSaveResponseProtoOrBuilder { - // Use MetaSaveResponseProto.newBuilder() to construct. - private MetaSaveResponseProto(Builder builder) { - super(builder); - } - private MetaSaveResponseProto(boolean noInit) {} - - private static final MetaSaveResponseProto defaultInstance; - public static MetaSaveResponseProto getDefaultInstance() { - return defaultInstance; - } - - public MetaSaveResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MetaSaveResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MetaSaveResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MetaSaveResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_MetaSaveResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:MetaSaveResponseProto) - } - - static { - defaultInstance = new MetaSaveResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:MetaSaveResponseProto) - } - - public interface GetFileInfoRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - } - public static final class GetFileInfoRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetFileInfoRequestProtoOrBuilder { - // Use GetFileInfoRequestProto.newBuilder() to construct. - private GetFileInfoRequestProto(Builder builder) { - super(builder); - } - private GetFileInfoRequestProto(boolean noInit) {} - - private static final GetFileInfoRequestProto defaultInstance; - public static GetFileInfoRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetFileInfoRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileInfoRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileInfoRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - src_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileInfoRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileInfoRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:GetFileInfoRequestProto) - } - - static { - defaultInstance = new GetFileInfoRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetFileInfoRequestProto) - } - - public interface GetFileInfoResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .HdfsFileStatusProto fs = 1; - boolean hasFs(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder(); - } - public static final class GetFileInfoResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetFileInfoResponseProtoOrBuilder { - // Use GetFileInfoResponseProto.newBuilder() to construct. - private GetFileInfoResponseProto(Builder builder) { - super(builder); - } - private GetFileInfoResponseProto(boolean noInit) {} - - private static final GetFileInfoResponseProto defaultInstance; - public static GetFileInfoResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetFileInfoResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileInfoResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileInfoResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .HdfsFileStatusProto fs = 1; - public static final int FS_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto fs_; - public boolean hasFs() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs() { - return fs_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder() { - return fs_; - } - - private void initFields() { - fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasFs()) { - memoizedIsInitialized = 0; - return false; - } - if (!getFs().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, fs_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, fs_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto) obj; - - boolean result = true; - result = result && (hasFs() == other.hasFs()); - if (hasFs()) { - result = result && getFs() - .equals(other.getFs()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasFs()) { - hash = (37 * hash) + FS_FIELD_NUMBER; - hash = (53 * hash) + getFs().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileInfoResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileInfoResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getFsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (fsBuilder_ == null) { - fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); - } else { - fsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (fsBuilder_ == null) { - result.fs_ = fs_; - } else { - result.fs_ = fsBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.getDefaultInstance()) return this; - if (other.hasFs()) { - mergeFs(other.getFs()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasFs()) { - - return false; - } - if (!getFs().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder(); - if (hasFs()) { - subBuilder.mergeFrom(getFs()); - } - input.readMessage(subBuilder, extensionRegistry); - setFs(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .HdfsFileStatusProto fs = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> fsBuilder_; - public boolean hasFs() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs() { - if (fsBuilder_ == null) { - return fs_; - } else { - return fsBuilder_.getMessage(); - } - } - public Builder setFs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { - if (fsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - fs_ = value; - onChanged(); - } else { - fsBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setFs( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { - if (fsBuilder_ == null) { - fs_ = builderForValue.build(); - onChanged(); - } else { - fsBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeFs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { - if (fsBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - fs_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) { - fs_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder(fs_).mergeFrom(value).buildPartial(); - } else { - fs_ = value; - } - onChanged(); - } else { - fsBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearFs() { - if (fsBuilder_ == null) { - fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); - onChanged(); - } else { - fsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getFsBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getFsFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder() { - if (fsBuilder_ != null) { - return fsBuilder_.getMessageOrBuilder(); - } else { - return fs_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> - getFsFieldBuilder() { - if (fsBuilder_ == null) { - fsBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( - fs_, - getParentForChildren(), - isClean()); - fs_ = null; - } - return fsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:GetFileInfoResponseProto) - } - - static { - defaultInstance = new GetFileInfoResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetFileInfoResponseProto) - } - - public interface GetFileLinkInfoRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - } - public static final class GetFileLinkInfoRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetFileLinkInfoRequestProtoOrBuilder { - // Use GetFileLinkInfoRequestProto.newBuilder() to construct. - private GetFileLinkInfoRequestProto(Builder builder) { - super(builder); - } - private GetFileLinkInfoRequestProto(boolean noInit) {} - - private static final GetFileLinkInfoRequestProto defaultInstance; - public static GetFileLinkInfoRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetFileLinkInfoRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileLinkInfoRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileLinkInfoRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - src_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileLinkInfoRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileLinkInfoRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:GetFileLinkInfoRequestProto) - } - - static { - defaultInstance = new GetFileLinkInfoRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetFileLinkInfoRequestProto) - } - - public interface GetFileLinkInfoResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .HdfsFileStatusProto fs = 1; - boolean hasFs(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder(); - } - public static final class GetFileLinkInfoResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetFileLinkInfoResponseProtoOrBuilder { - // Use GetFileLinkInfoResponseProto.newBuilder() to construct. - private GetFileLinkInfoResponseProto(Builder builder) { - super(builder); - } - private GetFileLinkInfoResponseProto(boolean noInit) {} - - private static final GetFileLinkInfoResponseProto defaultInstance; - public static GetFileLinkInfoResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetFileLinkInfoResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileLinkInfoResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileLinkInfoResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .HdfsFileStatusProto fs = 1; - public static final int FS_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto fs_; - public boolean hasFs() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs() { - return fs_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder() { - return fs_; - } - - private void initFields() { - fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasFs()) { - memoizedIsInitialized = 0; - return false; - } - if (!getFs().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, fs_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, fs_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto) obj; - - boolean result = true; - result = result && (hasFs() == other.hasFs()); - if (hasFs()) { - result = result && getFs() - .equals(other.getFs()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasFs()) { - hash = (37 * hash) + FS_FIELD_NUMBER; - hash = (53 * hash) + getFs().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileLinkInfoResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetFileLinkInfoResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getFsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (fsBuilder_ == null) { - fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); - } else { - fsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (fsBuilder_ == null) { - result.fs_ = fs_; - } else { - result.fs_ = fsBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.getDefaultInstance()) return this; - if (other.hasFs()) { - mergeFs(other.getFs()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasFs()) { - - return false; - } - if (!getFs().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder(); - if (hasFs()) { - subBuilder.mergeFrom(getFs()); - } - input.readMessage(subBuilder, extensionRegistry); - setFs(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .HdfsFileStatusProto fs = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> fsBuilder_; - public boolean hasFs() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getFs() { - if (fsBuilder_ == null) { - return fs_; - } else { - return fsBuilder_.getMessage(); - } - } - public Builder setFs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { - if (fsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - fs_ = value; - onChanged(); - } else { - fsBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setFs( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { - if (fsBuilder_ == null) { - fs_ = builderForValue.build(); - onChanged(); - } else { - fsBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeFs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { - if (fsBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - fs_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) { - fs_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder(fs_).mergeFrom(value).buildPartial(); - } else { - fs_ = value; - } - onChanged(); - } else { - fsBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearFs() { - if (fsBuilder_ == null) { - fs_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); - onChanged(); - } else { - fsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getFsBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getFsFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getFsOrBuilder() { - if (fsBuilder_ != null) { - return fsBuilder_.getMessageOrBuilder(); - } else { - return fs_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> - getFsFieldBuilder() { - if (fsBuilder_ == null) { - fsBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( - fs_, - getParentForChildren(), - isClean()); - fs_ = null; - } - return fsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:GetFileLinkInfoResponseProto) - } - - static { - defaultInstance = new GetFileLinkInfoResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetFileLinkInfoResponseProto) - } - - public interface GetContentSummaryRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string path = 1; - boolean hasPath(); - String getPath(); - } - public static final class GetContentSummaryRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetContentSummaryRequestProtoOrBuilder { - // Use GetContentSummaryRequestProto.newBuilder() to construct. - private GetContentSummaryRequestProto(Builder builder) { - super(builder); - } - private GetContentSummaryRequestProto(boolean noInit) {} - - private static final GetContentSummaryRequestProto defaultInstance; - public static GetContentSummaryRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetContentSummaryRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetContentSummaryRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetContentSummaryRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string path = 1; - public static final int PATH_FIELD_NUMBER = 1; - private java.lang.Object path_; - public boolean hasPath() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getPath() { - java.lang.Object ref = path_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - path_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getPathBytes() { - java.lang.Object ref = path_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - path_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - path_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasPath()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getPathBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getPathBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto) obj; - - boolean result = true; - result = result && (hasPath() == other.hasPath()); - if (hasPath()) { - result = result && getPath() - .equals(other.getPath()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasPath()) { - hash = (37 * hash) + PATH_FIELD_NUMBER; - hash = (53 * hash) + getPath().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetContentSummaryRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetContentSummaryRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - path_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.path_ = path_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.getDefaultInstance()) return this; - if (other.hasPath()) { - setPath(other.getPath()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasPath()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - path_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string path = 1; - private java.lang.Object path_ = ""; - public boolean hasPath() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getPath() { - java.lang.Object ref = path_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - path_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setPath(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - path_ = value; - onChanged(); - return this; - } - public Builder clearPath() { - bitField0_ = (bitField0_ & ~0x00000001); - path_ = getDefaultInstance().getPath(); - onChanged(); - return this; - } - void setPath(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - path_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:GetContentSummaryRequestProto) - } - - static { - defaultInstance = new GetContentSummaryRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetContentSummaryRequestProto) - } - - public interface GetContentSummaryResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ContentSummaryProto summary = 1; - boolean hasSummary(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getSummary(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder getSummaryOrBuilder(); - } - public static final class GetContentSummaryResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetContentSummaryResponseProtoOrBuilder { - // Use GetContentSummaryResponseProto.newBuilder() to construct. - private GetContentSummaryResponseProto(Builder builder) { - super(builder); - } - private GetContentSummaryResponseProto(boolean noInit) {} - - private static final GetContentSummaryResponseProto defaultInstance; - public static GetContentSummaryResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetContentSummaryResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetContentSummaryResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetContentSummaryResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ContentSummaryProto summary = 1; - public static final int SUMMARY_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto summary_; - public boolean hasSummary() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getSummary() { - return summary_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder getSummaryOrBuilder() { - return summary_; - } - - private void initFields() { - summary_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSummary()) { - memoizedIsInitialized = 0; - return false; - } - if (!getSummary().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, summary_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, summary_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto) obj; - - boolean result = true; - result = result && (hasSummary() == other.hasSummary()); - if (hasSummary()) { - result = result && getSummary() - .equals(other.getSummary()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSummary()) { - hash = (37 * hash) + SUMMARY_FIELD_NUMBER; - hash = (53 * hash) + getSummary().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetContentSummaryResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetContentSummaryResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getSummaryFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (summaryBuilder_ == null) { - summary_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance(); - } else { - summaryBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (summaryBuilder_ == null) { - result.summary_ = summary_; - } else { - result.summary_ = summaryBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.getDefaultInstance()) return this; - if (other.hasSummary()) { - mergeSummary(other.getSummary()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSummary()) { - - return false; - } - if (!getSummary().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.newBuilder(); - if (hasSummary()) { - subBuilder.mergeFrom(getSummary()); - } - input.readMessage(subBuilder, extensionRegistry); - setSummary(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ContentSummaryProto summary = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto summary_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder> summaryBuilder_; - public boolean hasSummary() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getSummary() { - if (summaryBuilder_ == null) { - return summary_; - } else { - return summaryBuilder_.getMessage(); - } - } - public Builder setSummary(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto value) { - if (summaryBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - summary_ = value; - onChanged(); - } else { - summaryBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setSummary( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder builderForValue) { - if (summaryBuilder_ == null) { - summary_ = builderForValue.build(); - onChanged(); - } else { - summaryBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeSummary(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto value) { - if (summaryBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - summary_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance()) { - summary_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.newBuilder(summary_).mergeFrom(value).buildPartial(); - } else { - summary_ = value; - } - onChanged(); - } else { - summaryBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearSummary() { - if (summaryBuilder_ == null) { - summary_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance(); - onChanged(); - } else { - summaryBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder getSummaryBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getSummaryFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder getSummaryOrBuilder() { - if (summaryBuilder_ != null) { - return summaryBuilder_.getMessageOrBuilder(); - } else { - return summary_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder> - getSummaryFieldBuilder() { - if (summaryBuilder_ == null) { - summaryBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder>( - summary_, - getParentForChildren(), - isClean()); - summary_ = null; - } - return summaryBuilder_; - } - - // @@protoc_insertion_point(builder_scope:GetContentSummaryResponseProto) - } - - static { - defaultInstance = new GetContentSummaryResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetContentSummaryResponseProto) - } - - public interface SetQuotaRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string path = 1; - boolean hasPath(); - String getPath(); - - // required uint64 namespaceQuota = 2; - boolean hasNamespaceQuota(); - long getNamespaceQuota(); - - // required uint64 diskspaceQuota = 3; - boolean hasDiskspaceQuota(); - long getDiskspaceQuota(); - } - public static final class SetQuotaRequestProto extends - com.google.protobuf.GeneratedMessage - implements SetQuotaRequestProtoOrBuilder { - // Use SetQuotaRequestProto.newBuilder() to construct. - private SetQuotaRequestProto(Builder builder) { - super(builder); - } - private SetQuotaRequestProto(boolean noInit) {} - - private static final SetQuotaRequestProto defaultInstance; - public static SetQuotaRequestProto getDefaultInstance() { - return defaultInstance; - } - - public SetQuotaRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetQuotaRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetQuotaRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string path = 1; - public static final int PATH_FIELD_NUMBER = 1; - private java.lang.Object path_; - public boolean hasPath() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getPath() { - java.lang.Object ref = path_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - path_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getPathBytes() { - java.lang.Object ref = path_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - path_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required uint64 namespaceQuota = 2; - public static final int NAMESPACEQUOTA_FIELD_NUMBER = 2; - private long namespaceQuota_; - public boolean hasNamespaceQuota() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getNamespaceQuota() { - return namespaceQuota_; - } - - // required uint64 diskspaceQuota = 3; - public static final int DISKSPACEQUOTA_FIELD_NUMBER = 3; - private long diskspaceQuota_; - public boolean hasDiskspaceQuota() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getDiskspaceQuota() { - return diskspaceQuota_; - } - - private void initFields() { - path_ = ""; - namespaceQuota_ = 0L; - diskspaceQuota_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasPath()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasNamespaceQuota()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasDiskspaceQuota()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getPathBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, namespaceQuota_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, diskspaceQuota_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getPathBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, namespaceQuota_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, diskspaceQuota_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto) obj; - - boolean result = true; - result = result && (hasPath() == other.hasPath()); - if (hasPath()) { - result = result && getPath() - .equals(other.getPath()); - } - result = result && (hasNamespaceQuota() == other.hasNamespaceQuota()); - if (hasNamespaceQuota()) { - result = result && (getNamespaceQuota() - == other.getNamespaceQuota()); - } - result = result && (hasDiskspaceQuota() == other.hasDiskspaceQuota()); - if (hasDiskspaceQuota()) { - result = result && (getDiskspaceQuota() - == other.getDiskspaceQuota()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasPath()) { - hash = (37 * hash) + PATH_FIELD_NUMBER; - hash = (53 * hash) + getPath().hashCode(); - } - if (hasNamespaceQuota()) { - hash = (37 * hash) + NAMESPACEQUOTA_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getNamespaceQuota()); - } - if (hasDiskspaceQuota()) { - hash = (37 * hash) + DISKSPACEQUOTA_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getDiskspaceQuota()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetQuotaRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetQuotaRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - path_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - namespaceQuota_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - diskspaceQuota_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.path_ = path_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.namespaceQuota_ = namespaceQuota_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.diskspaceQuota_ = diskspaceQuota_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.getDefaultInstance()) return this; - if (other.hasPath()) { - setPath(other.getPath()); - } - if (other.hasNamespaceQuota()) { - setNamespaceQuota(other.getNamespaceQuota()); - } - if (other.hasDiskspaceQuota()) { - setDiskspaceQuota(other.getDiskspaceQuota()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasPath()) { - - return false; - } - if (!hasNamespaceQuota()) { - - return false; - } - if (!hasDiskspaceQuota()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - path_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - namespaceQuota_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - diskspaceQuota_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required string path = 1; - private java.lang.Object path_ = ""; - public boolean hasPath() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getPath() { - java.lang.Object ref = path_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - path_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setPath(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - path_ = value; - onChanged(); - return this; - } - public Builder clearPath() { - bitField0_ = (bitField0_ & ~0x00000001); - path_ = getDefaultInstance().getPath(); - onChanged(); - return this; - } - void setPath(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - path_ = value; - onChanged(); - } - - // required uint64 namespaceQuota = 2; - private long namespaceQuota_ ; - public boolean hasNamespaceQuota() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getNamespaceQuota() { - return namespaceQuota_; - } - public Builder setNamespaceQuota(long value) { - bitField0_ |= 0x00000002; - namespaceQuota_ = value; - onChanged(); - return this; - } - public Builder clearNamespaceQuota() { - bitField0_ = (bitField0_ & ~0x00000002); - namespaceQuota_ = 0L; - onChanged(); - return this; - } - - // required uint64 diskspaceQuota = 3; - private long diskspaceQuota_ ; - public boolean hasDiskspaceQuota() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getDiskspaceQuota() { - return diskspaceQuota_; - } - public Builder setDiskspaceQuota(long value) { - bitField0_ |= 0x00000004; - diskspaceQuota_ = value; - onChanged(); - return this; - } - public Builder clearDiskspaceQuota() { - bitField0_ = (bitField0_ & ~0x00000004); - diskspaceQuota_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:SetQuotaRequestProto) - } - - static { - defaultInstance = new SetQuotaRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SetQuotaRequestProto) - } - - public interface SetQuotaResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class SetQuotaResponseProto extends - com.google.protobuf.GeneratedMessage - implements SetQuotaResponseProtoOrBuilder { - // Use SetQuotaResponseProto.newBuilder() to construct. - private SetQuotaResponseProto(Builder builder) { - super(builder); - } - private SetQuotaResponseProto(boolean noInit) {} - - private static final SetQuotaResponseProto defaultInstance; - public static SetQuotaResponseProto getDefaultInstance() { - return defaultInstance; - } - - public SetQuotaResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetQuotaResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetQuotaResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetQuotaResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetQuotaResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:SetQuotaResponseProto) - } - - static { - defaultInstance = new SetQuotaResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SetQuotaResponseProto) - } - - public interface FsyncRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required string client = 2; - boolean hasClient(); - String getClient(); - } - public static final class FsyncRequestProto extends - com.google.protobuf.GeneratedMessage - implements FsyncRequestProtoOrBuilder { - // Use FsyncRequestProto.newBuilder() to construct. - private FsyncRequestProto(Builder builder) { - super(builder); - } - private FsyncRequestProto(boolean noInit) {} - - private static final FsyncRequestProto defaultInstance; - public static FsyncRequestProto getDefaultInstance() { - return defaultInstance; - } - - public FsyncRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FsyncRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FsyncRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string client = 2; - public static final int CLIENT_FIELD_NUMBER = 2; - private java.lang.Object client_; - public boolean hasClient() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getClient() { - java.lang.Object ref = client_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - client_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getClientBytes() { - java.lang.Object ref = client_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - client_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - src_ = ""; - client_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasClient()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getClientBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getClientBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasClient() == other.hasClient()); - if (hasClient()) { - result = result && getClient() - .equals(other.getClient()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasClient()) { - hash = (37 * hash) + CLIENT_FIELD_NUMBER; - hash = (53 * hash) + getClient().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FsyncRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FsyncRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - client_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.client_ = client_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasClient()) { - setClient(other.getClient()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasClient()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - client_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required string client = 2; - private java.lang.Object client_ = ""; - public boolean hasClient() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getClient() { - java.lang.Object ref = client_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - client_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setClient(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - client_ = value; - onChanged(); - return this; - } - public Builder clearClient() { - bitField0_ = (bitField0_ & ~0x00000002); - client_ = getDefaultInstance().getClient(); - onChanged(); - return this; - } - void setClient(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - client_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:FsyncRequestProto) - } - - static { - defaultInstance = new FsyncRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:FsyncRequestProto) - } - - public interface FsyncResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class FsyncResponseProto extends - com.google.protobuf.GeneratedMessage - implements FsyncResponseProtoOrBuilder { - // Use FsyncResponseProto.newBuilder() to construct. - private FsyncResponseProto(Builder builder) { - super(builder); - } - private FsyncResponseProto(boolean noInit) {} - - private static final FsyncResponseProto defaultInstance; - public static FsyncResponseProto getDefaultInstance() { - return defaultInstance; - } - - public FsyncResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FsyncResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FsyncResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FsyncResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_FsyncResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:FsyncResponseProto) - } - - static { - defaultInstance = new FsyncResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:FsyncResponseProto) - } - - public interface SetTimesRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string src = 1; - boolean hasSrc(); - String getSrc(); - - // required uint64 mtime = 2; - boolean hasMtime(); - long getMtime(); - - // required uint64 atime = 3; - boolean hasAtime(); - long getAtime(); - } - public static final class SetTimesRequestProto extends - com.google.protobuf.GeneratedMessage - implements SetTimesRequestProtoOrBuilder { - // Use SetTimesRequestProto.newBuilder() to construct. - private SetTimesRequestProto(Builder builder) { - super(builder); - } - private SetTimesRequestProto(boolean noInit) {} - - private static final SetTimesRequestProto defaultInstance; - public static SetTimesRequestProto getDefaultInstance() { - return defaultInstance; - } - - public SetTimesRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetTimesRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetTimesRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string src = 1; - public static final int SRC_FIELD_NUMBER = 1; - private java.lang.Object src_; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - src_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getSrcBytes() { - java.lang.Object ref = src_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - src_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required uint64 mtime = 2; - public static final int MTIME_FIELD_NUMBER = 2; - private long mtime_; - public boolean hasMtime() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getMtime() { - return mtime_; - } - - // required uint64 atime = 3; - public static final int ATIME_FIELD_NUMBER = 3; - private long atime_; - public boolean hasAtime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getAtime() { - return atime_; - } - - private void initFields() { - src_ = ""; - mtime_ = 0L; - atime_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasMtime()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasAtime()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, mtime_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, atime_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getSrcBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, mtime_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, atime_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto) obj; - - boolean result = true; - result = result && (hasSrc() == other.hasSrc()); - if (hasSrc()) { - result = result && getSrc() - .equals(other.getSrc()); - } - result = result && (hasMtime() == other.hasMtime()); - if (hasMtime()) { - result = result && (getMtime() - == other.getMtime()); - } - result = result && (hasAtime() == other.hasAtime()); - if (hasAtime()) { - result = result && (getAtime() - == other.getAtime()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSrc()) { - hash = (37 * hash) + SRC_FIELD_NUMBER; - hash = (53 * hash) + getSrc().hashCode(); - } - if (hasMtime()) { - hash = (37 * hash) + MTIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getMtime()); - } - if (hasAtime()) { - hash = (37 * hash) + ATIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getAtime()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetTimesRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetTimesRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - src_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - mtime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - atime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.src_ = src_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.mtime_ = mtime_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.atime_ = atime_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.getDefaultInstance()) return this; - if (other.hasSrc()) { - setSrc(other.getSrc()); - } - if (other.hasMtime()) { - setMtime(other.getMtime()); - } - if (other.hasAtime()) { - setAtime(other.getAtime()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSrc()) { - - return false; - } - if (!hasMtime()) { - - return false; - } - if (!hasAtime()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - src_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - mtime_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - atime_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required string src = 1; - private java.lang.Object src_ = ""; - public boolean hasSrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getSrc() { - java.lang.Object ref = src_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - src_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setSrc(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - return this; - } - public Builder clearSrc() { - bitField0_ = (bitField0_ & ~0x00000001); - src_ = getDefaultInstance().getSrc(); - onChanged(); - return this; - } - void setSrc(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - src_ = value; - onChanged(); - } - - // required uint64 mtime = 2; - private long mtime_ ; - public boolean hasMtime() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getMtime() { - return mtime_; - } - public Builder setMtime(long value) { - bitField0_ |= 0x00000002; - mtime_ = value; - onChanged(); - return this; - } - public Builder clearMtime() { - bitField0_ = (bitField0_ & ~0x00000002); - mtime_ = 0L; - onChanged(); - return this; - } - - // required uint64 atime = 3; - private long atime_ ; - public boolean hasAtime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getAtime() { - return atime_; - } - public Builder setAtime(long value) { - bitField0_ |= 0x00000004; - atime_ = value; - onChanged(); - return this; - } - public Builder clearAtime() { - bitField0_ = (bitField0_ & ~0x00000004); - atime_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:SetTimesRequestProto) - } - - static { - defaultInstance = new SetTimesRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SetTimesRequestProto) - } - - public interface SetTimesResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class SetTimesResponseProto extends - com.google.protobuf.GeneratedMessage - implements SetTimesResponseProtoOrBuilder { - // Use SetTimesResponseProto.newBuilder() to construct. - private SetTimesResponseProto(Builder builder) { - super(builder); - } - private SetTimesResponseProto(boolean noInit) {} - - private static final SetTimesResponseProto defaultInstance; - public static SetTimesResponseProto getDefaultInstance() { - return defaultInstance; - } - - public SetTimesResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetTimesResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetTimesResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetTimesResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetTimesResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:SetTimesResponseProto) - } - - static { - defaultInstance = new SetTimesResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SetTimesResponseProto) - } - - public interface CreateSymlinkRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string target = 1; - boolean hasTarget(); - String getTarget(); - - // required string link = 2; - boolean hasLink(); - String getLink(); - - // required .FsPermissionProto dirPerm = 3; - boolean hasDirPerm(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getDirPerm(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getDirPermOrBuilder(); - - // required bool createParent = 4; - boolean hasCreateParent(); - boolean getCreateParent(); - } - public static final class CreateSymlinkRequestProto extends - com.google.protobuf.GeneratedMessage - implements CreateSymlinkRequestProtoOrBuilder { - // Use CreateSymlinkRequestProto.newBuilder() to construct. - private CreateSymlinkRequestProto(Builder builder) { - super(builder); - } - private CreateSymlinkRequestProto(boolean noInit) {} - - private static final CreateSymlinkRequestProto defaultInstance; - public static CreateSymlinkRequestProto getDefaultInstance() { - return defaultInstance; - } - - public CreateSymlinkRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateSymlinkRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateSymlinkRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string target = 1; - public static final int TARGET_FIELD_NUMBER = 1; - private java.lang.Object target_; - public boolean hasTarget() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getTarget() { - java.lang.Object ref = target_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - target_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getTargetBytes() { - java.lang.Object ref = target_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - target_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string link = 2; - public static final int LINK_FIELD_NUMBER = 2; - private java.lang.Object link_; - public boolean hasLink() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getLink() { - java.lang.Object ref = link_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - link_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getLinkBytes() { - java.lang.Object ref = link_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - link_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required .FsPermissionProto dirPerm = 3; - public static final int DIRPERM_FIELD_NUMBER = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto dirPerm_; - public boolean hasDirPerm() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getDirPerm() { - return dirPerm_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getDirPermOrBuilder() { - return dirPerm_; - } - - // required bool createParent = 4; - public static final int CREATEPARENT_FIELD_NUMBER = 4; - private boolean createParent_; - public boolean hasCreateParent() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public boolean getCreateParent() { - return createParent_; - } - - private void initFields() { - target_ = ""; - link_ = ""; - dirPerm_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - createParent_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasTarget()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasLink()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasDirPerm()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCreateParent()) { - memoizedIsInitialized = 0; - return false; - } - if (!getDirPerm().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getTargetBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getLinkBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(3, dirPerm_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBool(4, createParent_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getTargetBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getLinkBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, dirPerm_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(4, createParent_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto) obj; - - boolean result = true; - result = result && (hasTarget() == other.hasTarget()); - if (hasTarget()) { - result = result && getTarget() - .equals(other.getTarget()); - } - result = result && (hasLink() == other.hasLink()); - if (hasLink()) { - result = result && getLink() - .equals(other.getLink()); - } - result = result && (hasDirPerm() == other.hasDirPerm()); - if (hasDirPerm()) { - result = result && getDirPerm() - .equals(other.getDirPerm()); - } - result = result && (hasCreateParent() == other.hasCreateParent()); - if (hasCreateParent()) { - result = result && (getCreateParent() - == other.getCreateParent()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasTarget()) { - hash = (37 * hash) + TARGET_FIELD_NUMBER; - hash = (53 * hash) + getTarget().hashCode(); - } - if (hasLink()) { - hash = (37 * hash) + LINK_FIELD_NUMBER; - hash = (53 * hash) + getLink().hashCode(); - } - if (hasDirPerm()) { - hash = (37 * hash) + DIRPERM_FIELD_NUMBER; - hash = (53 * hash) + getDirPerm().hashCode(); - } - if (hasCreateParent()) { - hash = (37 * hash) + CREATEPARENT_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getCreateParent()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateSymlinkRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateSymlinkRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getDirPermFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - target_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - link_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - if (dirPermBuilder_ == null) { - dirPerm_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - } else { - dirPermBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - createParent_ = false; - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.target_ = target_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.link_ = link_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - if (dirPermBuilder_ == null) { - result.dirPerm_ = dirPerm_; - } else { - result.dirPerm_ = dirPermBuilder_.build(); - } - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.createParent_ = createParent_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.getDefaultInstance()) return this; - if (other.hasTarget()) { - setTarget(other.getTarget()); - } - if (other.hasLink()) { - setLink(other.getLink()); - } - if (other.hasDirPerm()) { - mergeDirPerm(other.getDirPerm()); - } - if (other.hasCreateParent()) { - setCreateParent(other.getCreateParent()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasTarget()) { - - return false; - } - if (!hasLink()) { - - return false; - } - if (!hasDirPerm()) { - - return false; - } - if (!hasCreateParent()) { - - return false; - } - if (!getDirPerm().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - target_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - link_ = input.readBytes(); - break; - } - case 26: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder(); - if (hasDirPerm()) { - subBuilder.mergeFrom(getDirPerm()); - } - input.readMessage(subBuilder, extensionRegistry); - setDirPerm(subBuilder.buildPartial()); - break; - } - case 32: { - bitField0_ |= 0x00000008; - createParent_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required string target = 1; - private java.lang.Object target_ = ""; - public boolean hasTarget() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getTarget() { - java.lang.Object ref = target_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - target_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setTarget(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - target_ = value; - onChanged(); - return this; - } - public Builder clearTarget() { - bitField0_ = (bitField0_ & ~0x00000001); - target_ = getDefaultInstance().getTarget(); - onChanged(); - return this; - } - void setTarget(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - target_ = value; - onChanged(); - } - - // required string link = 2; - private java.lang.Object link_ = ""; - public boolean hasLink() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getLink() { - java.lang.Object ref = link_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - link_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setLink(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - link_ = value; - onChanged(); - return this; - } - public Builder clearLink() { - bitField0_ = (bitField0_ & ~0x00000002); - link_ = getDefaultInstance().getLink(); - onChanged(); - return this; - } - void setLink(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - link_ = value; - onChanged(); - } - - // required .FsPermissionProto dirPerm = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto dirPerm_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> dirPermBuilder_; - public boolean hasDirPerm() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getDirPerm() { - if (dirPermBuilder_ == null) { - return dirPerm_; - } else { - return dirPermBuilder_.getMessage(); - } - } - public Builder setDirPerm(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) { - if (dirPermBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - dirPerm_ = value; - onChanged(); - } else { - dirPermBuilder_.setMessage(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder setDirPerm( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder builderForValue) { - if (dirPermBuilder_ == null) { - dirPerm_ = builderForValue.build(); - onChanged(); - } else { - dirPermBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder mergeDirPerm(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) { - if (dirPermBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && - dirPerm_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance()) { - dirPerm_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder(dirPerm_).mergeFrom(value).buildPartial(); - } else { - dirPerm_ = value; - } - onChanged(); - } else { - dirPermBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder clearDirPerm() { - if (dirPermBuilder_ == null) { - dirPerm_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - onChanged(); - } else { - dirPermBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder getDirPermBuilder() { - bitField0_ |= 0x00000004; - onChanged(); - return getDirPermFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getDirPermOrBuilder() { - if (dirPermBuilder_ != null) { - return dirPermBuilder_.getMessageOrBuilder(); - } else { - return dirPerm_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> - getDirPermFieldBuilder() { - if (dirPermBuilder_ == null) { - dirPermBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder>( - dirPerm_, - getParentForChildren(), - isClean()); - dirPerm_ = null; - } - return dirPermBuilder_; - } - - // required bool createParent = 4; - private boolean createParent_ ; - public boolean hasCreateParent() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public boolean getCreateParent() { - return createParent_; - } - public Builder setCreateParent(boolean value) { - bitField0_ |= 0x00000008; - createParent_ = value; - onChanged(); - return this; - } - public Builder clearCreateParent() { - bitField0_ = (bitField0_ & ~0x00000008); - createParent_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:CreateSymlinkRequestProto) - } - - static { - defaultInstance = new CreateSymlinkRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:CreateSymlinkRequestProto) - } - - public interface CreateSymlinkResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class CreateSymlinkResponseProto extends - com.google.protobuf.GeneratedMessage - implements CreateSymlinkResponseProtoOrBuilder { - // Use CreateSymlinkResponseProto.newBuilder() to construct. - private CreateSymlinkResponseProto(Builder builder) { - super(builder); - } - private CreateSymlinkResponseProto(boolean noInit) {} - - private static final CreateSymlinkResponseProto defaultInstance; - public static CreateSymlinkResponseProto getDefaultInstance() { - return defaultInstance; - } - - public CreateSymlinkResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateSymlinkResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateSymlinkResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateSymlinkResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CreateSymlinkResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:CreateSymlinkResponseProto) - } - - static { - defaultInstance = new CreateSymlinkResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:CreateSymlinkResponseProto) - } - - public interface GetLinkTargetRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string path = 1; - boolean hasPath(); - String getPath(); - } - public static final class GetLinkTargetRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetLinkTargetRequestProtoOrBuilder { - // Use GetLinkTargetRequestProto.newBuilder() to construct. - private GetLinkTargetRequestProto(Builder builder) { - super(builder); - } - private GetLinkTargetRequestProto(boolean noInit) {} - - private static final GetLinkTargetRequestProto defaultInstance; - public static GetLinkTargetRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetLinkTargetRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetLinkTargetRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetLinkTargetRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string path = 1; - public static final int PATH_FIELD_NUMBER = 1; - private java.lang.Object path_; - public boolean hasPath() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getPath() { - java.lang.Object ref = path_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - path_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getPathBytes() { - java.lang.Object ref = path_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - path_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - path_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasPath()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getPathBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getPathBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto) obj; - - boolean result = true; - result = result && (hasPath() == other.hasPath()); - if (hasPath()) { - result = result && getPath() - .equals(other.getPath()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasPath()) { - hash = (37 * hash) + PATH_FIELD_NUMBER; - hash = (53 * hash) + getPath().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetLinkTargetRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetLinkTargetRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - path_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.path_ = path_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.getDefaultInstance()) return this; - if (other.hasPath()) { - setPath(other.getPath()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasPath()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - path_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string path = 1; - private java.lang.Object path_ = ""; - public boolean hasPath() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getPath() { - java.lang.Object ref = path_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - path_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setPath(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - path_ = value; - onChanged(); - return this; - } - public Builder clearPath() { - bitField0_ = (bitField0_ & ~0x00000001); - path_ = getDefaultInstance().getPath(); - onChanged(); - return this; - } - void setPath(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - path_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:GetLinkTargetRequestProto) - } - - static { - defaultInstance = new GetLinkTargetRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetLinkTargetRequestProto) - } - - public interface GetLinkTargetResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string targetPath = 1; - boolean hasTargetPath(); - String getTargetPath(); - } - public static final class GetLinkTargetResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetLinkTargetResponseProtoOrBuilder { - // Use GetLinkTargetResponseProto.newBuilder() to construct. - private GetLinkTargetResponseProto(Builder builder) { - super(builder); - } - private GetLinkTargetResponseProto(boolean noInit) {} - - private static final GetLinkTargetResponseProto defaultInstance; - public static GetLinkTargetResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetLinkTargetResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetLinkTargetResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetLinkTargetResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required string targetPath = 1; - public static final int TARGETPATH_FIELD_NUMBER = 1; - private java.lang.Object targetPath_; - public boolean hasTargetPath() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getTargetPath() { - java.lang.Object ref = targetPath_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - targetPath_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getTargetPathBytes() { - java.lang.Object ref = targetPath_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - targetPath_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - targetPath_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasTargetPath()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getTargetPathBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getTargetPathBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto) obj; - - boolean result = true; - result = result && (hasTargetPath() == other.hasTargetPath()); - if (hasTargetPath()) { - result = result && getTargetPath() - .equals(other.getTargetPath()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasTargetPath()) { - hash = (37 * hash) + TARGETPATH_FIELD_NUMBER; - hash = (53 * hash) + getTargetPath().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetLinkTargetResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetLinkTargetResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - targetPath_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.targetPath_ = targetPath_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.getDefaultInstance()) return this; - if (other.hasTargetPath()) { - setTargetPath(other.getTargetPath()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasTargetPath()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - targetPath_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string targetPath = 1; - private java.lang.Object targetPath_ = ""; - public boolean hasTargetPath() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getTargetPath() { - java.lang.Object ref = targetPath_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - targetPath_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setTargetPath(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - targetPath_ = value; - onChanged(); - return this; - } - public Builder clearTargetPath() { - bitField0_ = (bitField0_ & ~0x00000001); - targetPath_ = getDefaultInstance().getTargetPath(); - onChanged(); - return this; - } - void setTargetPath(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - targetPath_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:GetLinkTargetResponseProto) - } - - static { - defaultInstance = new GetLinkTargetResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetLinkTargetResponseProto) - } - - public interface UpdateBlockForPipelineRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ExtendedBlockProto block = 1; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder(); - - // required string clientName = 2; - boolean hasClientName(); - String getClientName(); - } - public static final class UpdateBlockForPipelineRequestProto extends - com.google.protobuf.GeneratedMessage - implements UpdateBlockForPipelineRequestProtoOrBuilder { - // Use UpdateBlockForPipelineRequestProto.newBuilder() to construct. - private UpdateBlockForPipelineRequestProto(Builder builder) { - super(builder); - } - private UpdateBlockForPipelineRequestProto(boolean noInit) {} - - private static final UpdateBlockForPipelineRequestProto defaultInstance; - public static UpdateBlockForPipelineRequestProto getDefaultInstance() { - return defaultInstance; - } - - public UpdateBlockForPipelineRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdateBlockForPipelineRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdateBlockForPipelineRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ExtendedBlockProto block = 1; - public static final int BLOCK_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - // required string clientName = 2; - public static final int CLIENTNAME_FIELD_NUMBER = 2; - private java.lang.Object clientName_; - public boolean hasClientName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - clientName_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getClientNameBytes() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - clientName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - clientName_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasClientName()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, block_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getClientNameBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, block_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getClientNameBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto) obj; - - boolean result = true; - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && (hasClientName() == other.hasClientName()); - if (hasClientName()) { - result = result && getClientName() - .equals(other.getClientName()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - if (hasClientName()) { - hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; - hash = (53 * hash) + getClientName().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdateBlockForPipelineRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdateBlockForPipelineRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - clientName_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.clientName_ = clientName_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.getDefaultInstance()) return this; - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - if (other.hasClientName()) { - setClientName(other.getClientName()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlock()) { - - return false; - } - if (!hasClientName()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - case 18: { - bitField0_ |= 0x00000002; - clientName_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required .ExtendedBlockProto block = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // required string clientName = 2; - private java.lang.Object clientName_ = ""; - public boolean hasClientName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - clientName_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setClientName(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - clientName_ = value; - onChanged(); - return this; - } - public Builder clearClientName() { - bitField0_ = (bitField0_ & ~0x00000002); - clientName_ = getDefaultInstance().getClientName(); - onChanged(); - return this; - } - void setClientName(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - clientName_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:UpdateBlockForPipelineRequestProto) - } - - static { - defaultInstance = new UpdateBlockForPipelineRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:UpdateBlockForPipelineRequestProto) - } - - public interface UpdateBlockForPipelineResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .LocatedBlockProto block = 1; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder(); - } - public static final class UpdateBlockForPipelineResponseProto extends - com.google.protobuf.GeneratedMessage - implements UpdateBlockForPipelineResponseProtoOrBuilder { - // Use UpdateBlockForPipelineResponseProto.newBuilder() to construct. - private UpdateBlockForPipelineResponseProto(Builder builder) { - super(builder); - } - private UpdateBlockForPipelineResponseProto(boolean noInit) {} - - private static final UpdateBlockForPipelineResponseProto defaultInstance; - public static UpdateBlockForPipelineResponseProto getDefaultInstance() { - return defaultInstance; - } - - public UpdateBlockForPipelineResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdateBlockForPipelineResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdateBlockForPipelineResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .LocatedBlockProto block = 1; - public static final int BLOCK_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - private void initFields() { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, block_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, block_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto) obj; - - boolean result = true; - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdateBlockForPipelineResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdateBlockForPipelineResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.getDefaultInstance()) return this; - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlock()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .LocatedBlockProto block = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // @@protoc_insertion_point(builder_scope:UpdateBlockForPipelineResponseProto) - } - - static { - defaultInstance = new UpdateBlockForPipelineResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:UpdateBlockForPipelineResponseProto) - } - - public interface UpdatePipelineRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string clientName = 1; - boolean hasClientName(); - String getClientName(); - - // required .ExtendedBlockProto oldBlock = 2; - boolean hasOldBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getOldBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getOldBlockOrBuilder(); - - // required .ExtendedBlockProto newBlock = 3; - boolean hasNewBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getNewBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getNewBlockOrBuilder(); - - // repeated .DatanodeIDProto newNodes = 4; - java.util.List - getNewNodesList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewNodes(int index); - int getNewNodesCount(); - java.util.List - getNewNodesOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewNodesOrBuilder( - int index); - } - public static final class UpdatePipelineRequestProto extends - com.google.protobuf.GeneratedMessage - implements UpdatePipelineRequestProtoOrBuilder { - // Use UpdatePipelineRequestProto.newBuilder() to construct. - private UpdatePipelineRequestProto(Builder builder) { - super(builder); - } - private UpdatePipelineRequestProto(boolean noInit) {} - - private static final UpdatePipelineRequestProto defaultInstance; - public static UpdatePipelineRequestProto getDefaultInstance() { - return defaultInstance; - } - - public UpdatePipelineRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdatePipelineRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdatePipelineRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string clientName = 1; - public static final int CLIENTNAME_FIELD_NUMBER = 1; - private java.lang.Object clientName_; - public boolean hasClientName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - clientName_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getClientNameBytes() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - clientName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required .ExtendedBlockProto oldBlock = 2; - public static final int OLDBLOCK_FIELD_NUMBER = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto oldBlock_; - public boolean hasOldBlock() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getOldBlock() { - return oldBlock_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getOldBlockOrBuilder() { - return oldBlock_; - } - - // required .ExtendedBlockProto newBlock = 3; - public static final int NEWBLOCK_FIELD_NUMBER = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto newBlock_; - public boolean hasNewBlock() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getNewBlock() { - return newBlock_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getNewBlockOrBuilder() { - return newBlock_; - } - - // repeated .DatanodeIDProto newNodes = 4; - public static final int NEWNODES_FIELD_NUMBER = 4; - private java.util.List newNodes_; - public java.util.List getNewNodesList() { - return newNodes_; - } - public java.util.List - getNewNodesOrBuilderList() { - return newNodes_; - } - public int getNewNodesCount() { - return newNodes_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewNodes(int index) { - return newNodes_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewNodesOrBuilder( - int index) { - return newNodes_.get(index); - } - - private void initFields() { - clientName_ = ""; - oldBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - newBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - newNodes_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasClientName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasOldBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasNewBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!getOldBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - if (!getNewBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getNewNodesCount(); i++) { - if (!getNewNodes(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getClientNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, oldBlock_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(3, newBlock_); - } - for (int i = 0; i < newNodes_.size(); i++) { - output.writeMessage(4, newNodes_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getClientNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, oldBlock_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, newBlock_); - } - for (int i = 0; i < newNodes_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, newNodes_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto) obj; - - boolean result = true; - result = result && (hasClientName() == other.hasClientName()); - if (hasClientName()) { - result = result && getClientName() - .equals(other.getClientName()); - } - result = result && (hasOldBlock() == other.hasOldBlock()); - if (hasOldBlock()) { - result = result && getOldBlock() - .equals(other.getOldBlock()); - } - result = result && (hasNewBlock() == other.hasNewBlock()); - if (hasNewBlock()) { - result = result && getNewBlock() - .equals(other.getNewBlock()); - } - result = result && getNewNodesList() - .equals(other.getNewNodesList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasClientName()) { - hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; - hash = (53 * hash) + getClientName().hashCode(); - } - if (hasOldBlock()) { - hash = (37 * hash) + OLDBLOCK_FIELD_NUMBER; - hash = (53 * hash) + getOldBlock().hashCode(); - } - if (hasNewBlock()) { - hash = (37 * hash) + NEWBLOCK_FIELD_NUMBER; - hash = (53 * hash) + getNewBlock().hashCode(); - } - if (getNewNodesCount() > 0) { - hash = (37 * hash) + NEWNODES_FIELD_NUMBER; - hash = (53 * hash) + getNewNodesList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdatePipelineRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdatePipelineRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getOldBlockFieldBuilder(); - getNewBlockFieldBuilder(); - getNewNodesFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - clientName_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - if (oldBlockBuilder_ == null) { - oldBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } else { - oldBlockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - if (newBlockBuilder_ == null) { - newBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } else { - newBlockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - if (newNodesBuilder_ == null) { - newNodes_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - } else { - newNodesBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.clientName_ = clientName_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (oldBlockBuilder_ == null) { - result.oldBlock_ = oldBlock_; - } else { - result.oldBlock_ = oldBlockBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - if (newBlockBuilder_ == null) { - result.newBlock_ = newBlock_; - } else { - result.newBlock_ = newBlockBuilder_.build(); - } - if (newNodesBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008)) { - newNodes_ = java.util.Collections.unmodifiableList(newNodes_); - bitField0_ = (bitField0_ & ~0x00000008); - } - result.newNodes_ = newNodes_; - } else { - result.newNodes_ = newNodesBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.getDefaultInstance()) return this; - if (other.hasClientName()) { - setClientName(other.getClientName()); - } - if (other.hasOldBlock()) { - mergeOldBlock(other.getOldBlock()); - } - if (other.hasNewBlock()) { - mergeNewBlock(other.getNewBlock()); - } - if (newNodesBuilder_ == null) { - if (!other.newNodes_.isEmpty()) { - if (newNodes_.isEmpty()) { - newNodes_ = other.newNodes_; - bitField0_ = (bitField0_ & ~0x00000008); - } else { - ensureNewNodesIsMutable(); - newNodes_.addAll(other.newNodes_); - } - onChanged(); - } - } else { - if (!other.newNodes_.isEmpty()) { - if (newNodesBuilder_.isEmpty()) { - newNodesBuilder_.dispose(); - newNodesBuilder_ = null; - newNodes_ = other.newNodes_; - bitField0_ = (bitField0_ & ~0x00000008); - newNodesBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getNewNodesFieldBuilder() : null; - } else { - newNodesBuilder_.addAllMessages(other.newNodes_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasClientName()) { - - return false; - } - if (!hasOldBlock()) { - - return false; - } - if (!hasNewBlock()) { - - return false; - } - if (!getOldBlock().isInitialized()) { - - return false; - } - if (!getNewBlock().isInitialized()) { - - return false; - } - for (int i = 0; i < getNewNodesCount(); i++) { - if (!getNewNodes(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - clientName_ = input.readBytes(); - break; - } - case 18: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); - if (hasOldBlock()) { - subBuilder.mergeFrom(getOldBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setOldBlock(subBuilder.buildPartial()); - break; - } - case 26: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); - if (hasNewBlock()) { - subBuilder.mergeFrom(getNewBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setNewBlock(subBuilder.buildPartial()); - break; - } - case 34: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addNewNodes(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required string clientName = 1; - private java.lang.Object clientName_ = ""; - public boolean hasClientName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - clientName_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setClientName(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - clientName_ = value; - onChanged(); - return this; - } - public Builder clearClientName() { - bitField0_ = (bitField0_ & ~0x00000001); - clientName_ = getDefaultInstance().getClientName(); - onChanged(); - return this; - } - void setClientName(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - clientName_ = value; - onChanged(); - } - - // required .ExtendedBlockProto oldBlock = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto oldBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> oldBlockBuilder_; - public boolean hasOldBlock() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getOldBlock() { - if (oldBlockBuilder_ == null) { - return oldBlock_; - } else { - return oldBlockBuilder_.getMessage(); - } - } - public Builder setOldBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (oldBlockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - oldBlock_ = value; - onChanged(); - } else { - oldBlockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setOldBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { - if (oldBlockBuilder_ == null) { - oldBlock_ = builderForValue.build(); - onChanged(); - } else { - oldBlockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergeOldBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (oldBlockBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - oldBlock_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { - oldBlock_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(oldBlock_).mergeFrom(value).buildPartial(); - } else { - oldBlock_ = value; - } - onChanged(); - } else { - oldBlockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearOldBlock() { - if (oldBlockBuilder_ == null) { - oldBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - onChanged(); - } else { - oldBlockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getOldBlockBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getOldBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getOldBlockOrBuilder() { - if (oldBlockBuilder_ != null) { - return oldBlockBuilder_.getMessageOrBuilder(); - } else { - return oldBlock_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> - getOldBlockFieldBuilder() { - if (oldBlockBuilder_ == null) { - oldBlockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( - oldBlock_, - getParentForChildren(), - isClean()); - oldBlock_ = null; - } - return oldBlockBuilder_; - } - - // required .ExtendedBlockProto newBlock = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto newBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> newBlockBuilder_; - public boolean hasNewBlock() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getNewBlock() { - if (newBlockBuilder_ == null) { - return newBlock_; - } else { - return newBlockBuilder_.getMessage(); - } - } - public Builder setNewBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (newBlockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - newBlock_ = value; - onChanged(); - } else { - newBlockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder setNewBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { - if (newBlockBuilder_ == null) { - newBlock_ = builderForValue.build(); - onChanged(); - } else { - newBlockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder mergeNewBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (newBlockBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && - newBlock_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { - newBlock_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(newBlock_).mergeFrom(value).buildPartial(); - } else { - newBlock_ = value; - } - onChanged(); - } else { - newBlockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder clearNewBlock() { - if (newBlockBuilder_ == null) { - newBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - onChanged(); - } else { - newBlockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getNewBlockBuilder() { - bitField0_ |= 0x00000004; - onChanged(); - return getNewBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getNewBlockOrBuilder() { - if (newBlockBuilder_ != null) { - return newBlockBuilder_.getMessageOrBuilder(); - } else { - return newBlock_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> - getNewBlockFieldBuilder() { - if (newBlockBuilder_ == null) { - newBlockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( - newBlock_, - getParentForChildren(), - isClean()); - newBlock_ = null; - } - return newBlockBuilder_; - } - - // repeated .DatanodeIDProto newNodes = 4; - private java.util.List newNodes_ = - java.util.Collections.emptyList(); - private void ensureNewNodesIsMutable() { - if (!((bitField0_ & 0x00000008) == 0x00000008)) { - newNodes_ = new java.util.ArrayList(newNodes_); - bitField0_ |= 0x00000008; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> newNodesBuilder_; - - public java.util.List getNewNodesList() { - if (newNodesBuilder_ == null) { - return java.util.Collections.unmodifiableList(newNodes_); - } else { - return newNodesBuilder_.getMessageList(); - } - } - public int getNewNodesCount() { - if (newNodesBuilder_ == null) { - return newNodes_.size(); - } else { - return newNodesBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewNodes(int index) { - if (newNodesBuilder_ == null) { - return newNodes_.get(index); - } else { - return newNodesBuilder_.getMessage(index); - } - } - public Builder setNewNodes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { - if (newNodesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureNewNodesIsMutable(); - newNodes_.set(index, value); - onChanged(); - } else { - newNodesBuilder_.setMessage(index, value); - } - return this; - } - public Builder setNewNodes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { - if (newNodesBuilder_ == null) { - ensureNewNodesIsMutable(); - newNodes_.set(index, builderForValue.build()); - onChanged(); - } else { - newNodesBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addNewNodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { - if (newNodesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureNewNodesIsMutable(); - newNodes_.add(value); - onChanged(); - } else { - newNodesBuilder_.addMessage(value); - } - return this; - } - public Builder addNewNodes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { - if (newNodesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureNewNodesIsMutable(); - newNodes_.add(index, value); - onChanged(); - } else { - newNodesBuilder_.addMessage(index, value); - } - return this; - } - public Builder addNewNodes( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { - if (newNodesBuilder_ == null) { - ensureNewNodesIsMutable(); - newNodes_.add(builderForValue.build()); - onChanged(); - } else { - newNodesBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addNewNodes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { - if (newNodesBuilder_ == null) { - ensureNewNodesIsMutable(); - newNodes_.add(index, builderForValue.build()); - onChanged(); - } else { - newNodesBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllNewNodes( - java.lang.Iterable values) { - if (newNodesBuilder_ == null) { - ensureNewNodesIsMutable(); - super.addAll(values, newNodes_); - onChanged(); - } else { - newNodesBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearNewNodes() { - if (newNodesBuilder_ == null) { - newNodes_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - onChanged(); - } else { - newNodesBuilder_.clear(); - } - return this; - } - public Builder removeNewNodes(int index) { - if (newNodesBuilder_ == null) { - ensureNewNodesIsMutable(); - newNodes_.remove(index); - onChanged(); - } else { - newNodesBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getNewNodesBuilder( - int index) { - return getNewNodesFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewNodesOrBuilder( - int index) { - if (newNodesBuilder_ == null) { - return newNodes_.get(index); } else { - return newNodesBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getNewNodesOrBuilderList() { - if (newNodesBuilder_ != null) { - return newNodesBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(newNodes_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addNewNodesBuilder() { - return getNewNodesFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addNewNodesBuilder( - int index) { - return getNewNodesFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()); - } - public java.util.List - getNewNodesBuilderList() { - return getNewNodesFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> - getNewNodesFieldBuilder() { - if (newNodesBuilder_ == null) { - newNodesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>( - newNodes_, - ((bitField0_ & 0x00000008) == 0x00000008), - getParentForChildren(), - isClean()); - newNodes_ = null; - } - return newNodesBuilder_; - } - - // @@protoc_insertion_point(builder_scope:UpdatePipelineRequestProto) - } - - static { - defaultInstance = new UpdatePipelineRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:UpdatePipelineRequestProto) - } - - public interface UpdatePipelineResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class UpdatePipelineResponseProto extends - com.google.protobuf.GeneratedMessage - implements UpdatePipelineResponseProtoOrBuilder { - // Use UpdatePipelineResponseProto.newBuilder() to construct. - private UpdatePipelineResponseProto(Builder builder) { - super(builder); - } - private UpdatePipelineResponseProto(boolean noInit) {} - - private static final UpdatePipelineResponseProto defaultInstance; - public static UpdatePipelineResponseProto getDefaultInstance() { - return defaultInstance; - } - - public UpdatePipelineResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdatePipelineResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdatePipelineResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdatePipelineResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_UpdatePipelineResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:UpdatePipelineResponseProto) - } - - static { - defaultInstance = new UpdatePipelineResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:UpdatePipelineResponseProto) - } - - public interface GetDelegationTokenRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string renewer = 1; - boolean hasRenewer(); - String getRenewer(); - } - public static final class GetDelegationTokenRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetDelegationTokenRequestProtoOrBuilder { - // Use GetDelegationTokenRequestProto.newBuilder() to construct. - private GetDelegationTokenRequestProto(Builder builder) { - super(builder); - } - private GetDelegationTokenRequestProto(boolean noInit) {} - - private static final GetDelegationTokenRequestProto defaultInstance; - public static GetDelegationTokenRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetDelegationTokenRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDelegationTokenRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDelegationTokenRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required string renewer = 1; - public static final int RENEWER_FIELD_NUMBER = 1; - private java.lang.Object renewer_; - public boolean hasRenewer() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getRenewer() { - java.lang.Object ref = renewer_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - renewer_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getRenewerBytes() { - java.lang.Object ref = renewer_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - renewer_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - renewer_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRenewer()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getRenewerBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getRenewerBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto) obj; - - boolean result = true; - result = result && (hasRenewer() == other.hasRenewer()); - if (hasRenewer()) { - result = result && getRenewer() - .equals(other.getRenewer()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRenewer()) { - hash = (37 * hash) + RENEWER_FIELD_NUMBER; - hash = (53 * hash) + getRenewer().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDelegationTokenRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDelegationTokenRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - renewer_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.renewer_ = renewer_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto.getDefaultInstance()) return this; - if (other.hasRenewer()) { - setRenewer(other.getRenewer()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRenewer()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - renewer_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string renewer = 1; - private java.lang.Object renewer_ = ""; - public boolean hasRenewer() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getRenewer() { - java.lang.Object ref = renewer_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - renewer_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setRenewer(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - renewer_ = value; - onChanged(); - return this; - } - public Builder clearRenewer() { - bitField0_ = (bitField0_ & ~0x00000001); - renewer_ = getDefaultInstance().getRenewer(); - onChanged(); - return this; - } - void setRenewer(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - renewer_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:GetDelegationTokenRequestProto) - } - - static { - defaultInstance = new GetDelegationTokenRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetDelegationTokenRequestProto) - } - - public interface GetDelegationTokenResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .BlockTokenIdentifierProto token = 1; - boolean hasToken(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder(); - } - public static final class GetDelegationTokenResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetDelegationTokenResponseProtoOrBuilder { - // Use GetDelegationTokenResponseProto.newBuilder() to construct. - private GetDelegationTokenResponseProto(Builder builder) { - super(builder); - } - private GetDelegationTokenResponseProto(boolean noInit) {} - - private static final GetDelegationTokenResponseProto defaultInstance; - public static GetDelegationTokenResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetDelegationTokenResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDelegationTokenResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDelegationTokenResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .BlockTokenIdentifierProto token = 1; - public static final int TOKEN_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_; - public boolean hasToken() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() { - return token_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() { - return token_; - } - - private void initFields() { - token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasToken()) { - memoizedIsInitialized = 0; - return false; - } - if (!getToken().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, token_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, token_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto) obj; - - boolean result = true; - result = result && (hasToken() == other.hasToken()); - if (hasToken()) { - result = result && getToken() - .equals(other.getToken()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasToken()) { - hash = (37 * hash) + TOKEN_FIELD_NUMBER; - hash = (53 * hash) + getToken().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDelegationTokenResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_GetDelegationTokenResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTokenFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (tokenBuilder_ == null) { - token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - } else { - tokenBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (tokenBuilder_ == null) { - result.token_ = token_; - } else { - result.token_ = tokenBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto.getDefaultInstance()) return this; - if (other.hasToken()) { - mergeToken(other.getToken()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasToken()) { - - return false; - } - if (!getToken().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(); - if (hasToken()) { - subBuilder.mergeFrom(getToken()); - } - input.readMessage(subBuilder, extensionRegistry); - setToken(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .BlockTokenIdentifierProto token = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> tokenBuilder_; - public boolean hasToken() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() { - if (tokenBuilder_ == null) { - return token_; - } else { - return tokenBuilder_.getMessage(); - } - } - public Builder setToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) { - if (tokenBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - token_ = value; - onChanged(); - } else { - tokenBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setToken( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder builderForValue) { - if (tokenBuilder_ == null) { - token_ = builderForValue.build(); - onChanged(); - } else { - tokenBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) { - if (tokenBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - token_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance()) { - token_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(token_).mergeFrom(value).buildPartial(); - } else { - token_ = value; - } - onChanged(); - } else { - tokenBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearToken() { - if (tokenBuilder_ == null) { - token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - onChanged(); - } else { - tokenBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder getTokenBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getTokenFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() { - if (tokenBuilder_ != null) { - return tokenBuilder_.getMessageOrBuilder(); - } else { - return token_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> - getTokenFieldBuilder() { - if (tokenBuilder_ == null) { - tokenBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder>( - token_, - getParentForChildren(), - isClean()); - token_ = null; - } - return tokenBuilder_; - } - - // @@protoc_insertion_point(builder_scope:GetDelegationTokenResponseProto) - } - - static { - defaultInstance = new GetDelegationTokenResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetDelegationTokenResponseProto) - } - - public interface RenewDelegationTokenRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .BlockTokenIdentifierProto token = 1; - boolean hasToken(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder(); - } - public static final class RenewDelegationTokenRequestProto extends - com.google.protobuf.GeneratedMessage - implements RenewDelegationTokenRequestProtoOrBuilder { - // Use RenewDelegationTokenRequestProto.newBuilder() to construct. - private RenewDelegationTokenRequestProto(Builder builder) { - super(builder); - } - private RenewDelegationTokenRequestProto(boolean noInit) {} - - private static final RenewDelegationTokenRequestProto defaultInstance; - public static RenewDelegationTokenRequestProto getDefaultInstance() { - return defaultInstance; - } - - public RenewDelegationTokenRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewDelegationTokenRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewDelegationTokenRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .BlockTokenIdentifierProto token = 1; - public static final int TOKEN_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_; - public boolean hasToken() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() { - return token_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() { - return token_; - } - - private void initFields() { - token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasToken()) { - memoizedIsInitialized = 0; - return false; - } - if (!getToken().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, token_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, token_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto) obj; - - boolean result = true; - result = result && (hasToken() == other.hasToken()); - if (hasToken()) { - result = result && getToken() - .equals(other.getToken()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasToken()) { - hash = (37 * hash) + TOKEN_FIELD_NUMBER; - hash = (53 * hash) + getToken().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewDelegationTokenRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewDelegationTokenRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTokenFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (tokenBuilder_ == null) { - token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - } else { - tokenBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (tokenBuilder_ == null) { - result.token_ = token_; - } else { - result.token_ = tokenBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto.getDefaultInstance()) return this; - if (other.hasToken()) { - mergeToken(other.getToken()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasToken()) { - - return false; - } - if (!getToken().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(); - if (hasToken()) { - subBuilder.mergeFrom(getToken()); - } - input.readMessage(subBuilder, extensionRegistry); - setToken(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .BlockTokenIdentifierProto token = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> tokenBuilder_; - public boolean hasToken() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() { - if (tokenBuilder_ == null) { - return token_; - } else { - return tokenBuilder_.getMessage(); - } - } - public Builder setToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) { - if (tokenBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - token_ = value; - onChanged(); - } else { - tokenBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setToken( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder builderForValue) { - if (tokenBuilder_ == null) { - token_ = builderForValue.build(); - onChanged(); - } else { - tokenBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) { - if (tokenBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - token_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance()) { - token_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(token_).mergeFrom(value).buildPartial(); - } else { - token_ = value; - } - onChanged(); - } else { - tokenBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearToken() { - if (tokenBuilder_ == null) { - token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - onChanged(); - } else { - tokenBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder getTokenBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getTokenFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() { - if (tokenBuilder_ != null) { - return tokenBuilder_.getMessageOrBuilder(); - } else { - return token_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> - getTokenFieldBuilder() { - if (tokenBuilder_ == null) { - tokenBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder>( - token_, - getParentForChildren(), - isClean()); - token_ = null; - } - return tokenBuilder_; - } - - // @@protoc_insertion_point(builder_scope:RenewDelegationTokenRequestProto) - } - - static { - defaultInstance = new RenewDelegationTokenRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RenewDelegationTokenRequestProto) - } - - public interface RenewDelegationTokenResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint64 newExireTime = 1; - boolean hasNewExireTime(); - long getNewExireTime(); - } - public static final class RenewDelegationTokenResponseProto extends - com.google.protobuf.GeneratedMessage - implements RenewDelegationTokenResponseProtoOrBuilder { - // Use RenewDelegationTokenResponseProto.newBuilder() to construct. - private RenewDelegationTokenResponseProto(Builder builder) { - super(builder); - } - private RenewDelegationTokenResponseProto(boolean noInit) {} - - private static final RenewDelegationTokenResponseProto defaultInstance; - public static RenewDelegationTokenResponseProto getDefaultInstance() { - return defaultInstance; - } - - public RenewDelegationTokenResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewDelegationTokenResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewDelegationTokenResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint64 newExireTime = 1; - public static final int NEWEXIRETIME_FIELD_NUMBER = 1; - private long newExireTime_; - public boolean hasNewExireTime() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getNewExireTime() { - return newExireTime_; - } - - private void initFields() { - newExireTime_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasNewExireTime()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, newExireTime_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, newExireTime_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto) obj; - - boolean result = true; - result = result && (hasNewExireTime() == other.hasNewExireTime()); - if (hasNewExireTime()) { - result = result && (getNewExireTime() - == other.getNewExireTime()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasNewExireTime()) { - hash = (37 * hash) + NEWEXIRETIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getNewExireTime()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewDelegationTokenResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_RenewDelegationTokenResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - newExireTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.newExireTime_ = newExireTime_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto.getDefaultInstance()) return this; - if (other.hasNewExireTime()) { - setNewExireTime(other.getNewExireTime()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasNewExireTime()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - newExireTime_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required uint64 newExireTime = 1; - private long newExireTime_ ; - public boolean hasNewExireTime() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getNewExireTime() { - return newExireTime_; - } - public Builder setNewExireTime(long value) { - bitField0_ |= 0x00000001; - newExireTime_ = value; - onChanged(); - return this; - } - public Builder clearNewExireTime() { - bitField0_ = (bitField0_ & ~0x00000001); - newExireTime_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:RenewDelegationTokenResponseProto) - } - - static { - defaultInstance = new RenewDelegationTokenResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RenewDelegationTokenResponseProto) - } - - public interface CancelDelegationTokenRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .BlockTokenIdentifierProto token = 1; - boolean hasToken(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder(); - } - public static final class CancelDelegationTokenRequestProto extends - com.google.protobuf.GeneratedMessage - implements CancelDelegationTokenRequestProtoOrBuilder { - // Use CancelDelegationTokenRequestProto.newBuilder() to construct. - private CancelDelegationTokenRequestProto(Builder builder) { - super(builder); - } - private CancelDelegationTokenRequestProto(boolean noInit) {} - - private static final CancelDelegationTokenRequestProto defaultInstance; - public static CancelDelegationTokenRequestProto getDefaultInstance() { - return defaultInstance; - } - - public CancelDelegationTokenRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CancelDelegationTokenRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CancelDelegationTokenRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .BlockTokenIdentifierProto token = 1; - public static final int TOKEN_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_; - public boolean hasToken() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() { - return token_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() { - return token_; - } - - private void initFields() { - token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasToken()) { - memoizedIsInitialized = 0; - return false; - } - if (!getToken().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, token_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, token_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto) obj; - - boolean result = true; - result = result && (hasToken() == other.hasToken()); - if (hasToken()) { - result = result && getToken() - .equals(other.getToken()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasToken()) { - hash = (37 * hash) + TOKEN_FIELD_NUMBER; - hash = (53 * hash) + getToken().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CancelDelegationTokenRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CancelDelegationTokenRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTokenFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (tokenBuilder_ == null) { - token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - } else { - tokenBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (tokenBuilder_ == null) { - result.token_ = token_; - } else { - result.token_ = tokenBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto.getDefaultInstance()) return this; - if (other.hasToken()) { - mergeToken(other.getToken()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasToken()) { - - return false; - } - if (!getToken().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(); - if (hasToken()) { - subBuilder.mergeFrom(getToken()); - } - input.readMessage(subBuilder, extensionRegistry); - setToken(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .BlockTokenIdentifierProto token = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> tokenBuilder_; - public boolean hasToken() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() { - if (tokenBuilder_ == null) { - return token_; - } else { - return tokenBuilder_.getMessage(); - } - } - public Builder setToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) { - if (tokenBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - token_ = value; - onChanged(); - } else { - tokenBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setToken( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder builderForValue) { - if (tokenBuilder_ == null) { - token_ = builderForValue.build(); - onChanged(); - } else { - tokenBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) { - if (tokenBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - token_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance()) { - token_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(token_).mergeFrom(value).buildPartial(); - } else { - token_ = value; - } - onChanged(); - } else { - tokenBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearToken() { - if (tokenBuilder_ == null) { - token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - onChanged(); - } else { - tokenBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder getTokenBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getTokenFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() { - if (tokenBuilder_ != null) { - return tokenBuilder_.getMessageOrBuilder(); - } else { - return token_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> - getTokenFieldBuilder() { - if (tokenBuilder_ == null) { - tokenBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder>( - token_, - getParentForChildren(), - isClean()); - token_ = null; - } - return tokenBuilder_; - } - - // @@protoc_insertion_point(builder_scope:CancelDelegationTokenRequestProto) - } - - static { - defaultInstance = new CancelDelegationTokenRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:CancelDelegationTokenRequestProto) - } - - public interface CancelDelegationTokenResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class CancelDelegationTokenResponseProto extends - com.google.protobuf.GeneratedMessage - implements CancelDelegationTokenResponseProtoOrBuilder { - // Use CancelDelegationTokenResponseProto.newBuilder() to construct. - private CancelDelegationTokenResponseProto(Builder builder) { - super(builder); - } - private CancelDelegationTokenResponseProto(boolean noInit) {} - - private static final CancelDelegationTokenResponseProto defaultInstance; - public static CancelDelegationTokenResponseProto getDefaultInstance() { - return defaultInstance; - } - - public CancelDelegationTokenResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CancelDelegationTokenResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CancelDelegationTokenResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CancelDelegationTokenResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_CancelDelegationTokenResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:CancelDelegationTokenResponseProto) - } - - static { - defaultInstance = new CancelDelegationTokenResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:CancelDelegationTokenResponseProto) - } - - public interface SetBalancerBandwidthRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required int64 bandwidth = 1; - boolean hasBandwidth(); - long getBandwidth(); - } - public static final class SetBalancerBandwidthRequestProto extends - com.google.protobuf.GeneratedMessage - implements SetBalancerBandwidthRequestProtoOrBuilder { - // Use SetBalancerBandwidthRequestProto.newBuilder() to construct. - private SetBalancerBandwidthRequestProto(Builder builder) { - super(builder); - } - private SetBalancerBandwidthRequestProto(boolean noInit) {} - - private static final SetBalancerBandwidthRequestProto defaultInstance; - public static SetBalancerBandwidthRequestProto getDefaultInstance() { - return defaultInstance; - } - - public SetBalancerBandwidthRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetBalancerBandwidthRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetBalancerBandwidthRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required int64 bandwidth = 1; - public static final int BANDWIDTH_FIELD_NUMBER = 1; - private long bandwidth_; - public boolean hasBandwidth() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getBandwidth() { - return bandwidth_; - } - - private void initFields() { - bandwidth_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBandwidth()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, bandwidth_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, bandwidth_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto) obj; - - boolean result = true; - result = result && (hasBandwidth() == other.hasBandwidth()); - if (hasBandwidth()) { - result = result && (getBandwidth() - == other.getBandwidth()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBandwidth()) { - hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getBandwidth()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetBalancerBandwidthRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetBalancerBandwidthRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - bandwidth_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.bandwidth_ = bandwidth_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.getDefaultInstance()) return this; - if (other.hasBandwidth()) { - setBandwidth(other.getBandwidth()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBandwidth()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - bandwidth_ = input.readInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required int64 bandwidth = 1; - private long bandwidth_ ; - public boolean hasBandwidth() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getBandwidth() { - return bandwidth_; - } - public Builder setBandwidth(long value) { - bitField0_ |= 0x00000001; - bandwidth_ = value; - onChanged(); - return this; - } - public Builder clearBandwidth() { - bitField0_ = (bitField0_ & ~0x00000001); - bandwidth_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:SetBalancerBandwidthRequestProto) - } - - static { - defaultInstance = new SetBalancerBandwidthRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SetBalancerBandwidthRequestProto) - } - - public interface SetBalancerBandwidthResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class SetBalancerBandwidthResponseProto extends - com.google.protobuf.GeneratedMessage - implements SetBalancerBandwidthResponseProtoOrBuilder { - // Use SetBalancerBandwidthResponseProto.newBuilder() to construct. - private SetBalancerBandwidthResponseProto(Builder builder) { - super(builder); - } - private SetBalancerBandwidthResponseProto(boolean noInit) {} - - private static final SetBalancerBandwidthResponseProto defaultInstance; - public static SetBalancerBandwidthResponseProto getDefaultInstance() { - return defaultInstance; - } - - public SetBalancerBandwidthResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetBalancerBandwidthResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetBalancerBandwidthResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetBalancerBandwidthResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.internal_static_SetBalancerBandwidthResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:SetBalancerBandwidthResponseProto) - } - - static { - defaultInstance = new SetBalancerBandwidthResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SetBalancerBandwidthResponseProto) - } - - public static abstract class ClientNamenodeProtocol - implements com.google.protobuf.Service { - protected ClientNamenodeProtocol() {} - - public interface Interface { - public abstract void getBlockLocations( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getServerDefaults( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void create( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void append( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void setReplication( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void setPermission( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void setOwner( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void abandonBlock( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void addBlock( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getAdditionalDatanode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void complete( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void reportBadBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void concat( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void rename( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void rename2( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void delete( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void mkdirs( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getListing( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void renewLease( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void recoverLease( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getFsStats( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getDatanodeReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getPreferredBlockSize( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void setSafeMode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void saveNamespace( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void restoreFailedStorage( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void refreshNodes( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void finalizeUpgrade( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void distributedUpgradeProgress( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void listCorruptFileBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void metaSave( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getFileInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getFileLinkInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getContentSummary( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void setQuota( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void fsync( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void setTimes( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void createSymlink( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getLinkTarget( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void updateBlockForPipeline( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void updatePipeline( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void renewDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void cancelDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void setBalancerBandwidth( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto request, - com.google.protobuf.RpcCallback done); - - } - - public static com.google.protobuf.Service newReflectiveService( - final Interface impl) { - return new ClientNamenodeProtocol() { - @java.lang.Override - public void getBlockLocations( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getBlockLocations(controller, request, done); - } - - @java.lang.Override - public void getServerDefaults( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getServerDefaults(controller, request, done); - } - - @java.lang.Override - public void create( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.create(controller, request, done); - } - - @java.lang.Override - public void append( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.append(controller, request, done); - } - - @java.lang.Override - public void setReplication( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.setReplication(controller, request, done); - } - - @java.lang.Override - public void setPermission( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.setPermission(controller, request, done); - } - - @java.lang.Override - public void setOwner( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.setOwner(controller, request, done); - } - - @java.lang.Override - public void abandonBlock( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.abandonBlock(controller, request, done); - } - - @java.lang.Override - public void addBlock( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.addBlock(controller, request, done); - } - - @java.lang.Override - public void getAdditionalDatanode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getAdditionalDatanode(controller, request, done); - } - - @java.lang.Override - public void complete( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.complete(controller, request, done); - } - - @java.lang.Override - public void reportBadBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.reportBadBlocks(controller, request, done); - } - - @java.lang.Override - public void concat( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.concat(controller, request, done); - } - - @java.lang.Override - public void rename( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.rename(controller, request, done); - } - - @java.lang.Override - public void rename2( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto request, - com.google.protobuf.RpcCallback done) { - impl.rename2(controller, request, done); - } - - @java.lang.Override - public void delete( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.delete(controller, request, done); - } - - @java.lang.Override - public void mkdirs( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.mkdirs(controller, request, done); - } - - @java.lang.Override - public void getListing( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getListing(controller, request, done); - } - - @java.lang.Override - public void renewLease( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.renewLease(controller, request, done); - } - - @java.lang.Override - public void recoverLease( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.recoverLease(controller, request, done); - } - - @java.lang.Override - public void getFsStats( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getFsStats(controller, request, done); - } - - @java.lang.Override - public void getDatanodeReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getDatanodeReport(controller, request, done); - } - - @java.lang.Override - public void getPreferredBlockSize( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getPreferredBlockSize(controller, request, done); - } - - @java.lang.Override - public void setSafeMode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.setSafeMode(controller, request, done); - } - - @java.lang.Override - public void saveNamespace( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.saveNamespace(controller, request, done); - } - - @java.lang.Override - public void restoreFailedStorage( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.restoreFailedStorage(controller, request, done); - } - - @java.lang.Override - public void refreshNodes( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.refreshNodes(controller, request, done); - } - - @java.lang.Override - public void finalizeUpgrade( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.finalizeUpgrade(controller, request, done); - } - - @java.lang.Override - public void distributedUpgradeProgress( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.distributedUpgradeProgress(controller, request, done); - } - - @java.lang.Override - public void listCorruptFileBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.listCorruptFileBlocks(controller, request, done); - } - - @java.lang.Override - public void metaSave( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.metaSave(controller, request, done); - } - - @java.lang.Override - public void getFileInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getFileInfo(controller, request, done); - } - - @java.lang.Override - public void getFileLinkInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getFileLinkInfo(controller, request, done); - } - - @java.lang.Override - public void getContentSummary( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getContentSummary(controller, request, done); - } - - @java.lang.Override - public void setQuota( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.setQuota(controller, request, done); - } - - @java.lang.Override - public void fsync( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.fsync(controller, request, done); - } - - @java.lang.Override - public void setTimes( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.setTimes(controller, request, done); - } - - @java.lang.Override - public void createSymlink( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.createSymlink(controller, request, done); - } - - @java.lang.Override - public void getLinkTarget( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getLinkTarget(controller, request, done); - } - - @java.lang.Override - public void updateBlockForPipeline( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.updateBlockForPipeline(controller, request, done); - } - - @java.lang.Override - public void updatePipeline( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.updatePipeline(controller, request, done); - } - - @java.lang.Override - public void getDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getDelegationToken(controller, request, done); - } - - @java.lang.Override - public void renewDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.renewDelegationToken(controller, request, done); - } - - @java.lang.Override - public void cancelDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.cancelDelegationToken(controller, request, done); - } - - @java.lang.Override - public void setBalancerBandwidth( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.setBalancerBandwidth(controller, request, done); - } - - }; - } - - public static com.google.protobuf.BlockingService - newReflectiveBlockingService(final BlockingInterface impl) { - return new com.google.protobuf.BlockingService() { - public final com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptorForType() { - return getDescriptor(); - } - - public final com.google.protobuf.Message callBlockingMethod( - com.google.protobuf.Descriptors.MethodDescriptor method, - com.google.protobuf.RpcController controller, - com.google.protobuf.Message request) - throws com.google.protobuf.ServiceException { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.callBlockingMethod() given method descriptor for " + - "wrong service type."); - } - switch(method.getIndex()) { - case 0: - return impl.getBlockLocations(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto)request); - case 1: - return impl.getServerDefaults(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto)request); - case 2: - return impl.create(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto)request); - case 3: - return impl.append(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto)request); - case 4: - return impl.setReplication(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto)request); - case 5: - return impl.setPermission(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto)request); - case 6: - return impl.setOwner(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto)request); - case 7: - return impl.abandonBlock(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto)request); - case 8: - return impl.addBlock(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto)request); - case 9: - return impl.getAdditionalDatanode(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto)request); - case 10: - return impl.complete(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto)request); - case 11: - return impl.reportBadBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto)request); - case 12: - return impl.concat(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto)request); - case 13: - return impl.rename(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto)request); - case 14: - return impl.rename2(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto)request); - case 15: - return impl.delete(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto)request); - case 16: - return impl.mkdirs(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto)request); - case 17: - return impl.getListing(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto)request); - case 18: - return impl.renewLease(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto)request); - case 19: - return impl.recoverLease(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto)request); - case 20: - return impl.getFsStats(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto)request); - case 21: - return impl.getDatanodeReport(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto)request); - case 22: - return impl.getPreferredBlockSize(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto)request); - case 23: - return impl.setSafeMode(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto)request); - case 24: - return impl.saveNamespace(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto)request); - case 25: - return impl.restoreFailedStorage(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto)request); - case 26: - return impl.refreshNodes(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto)request); - case 27: - return impl.finalizeUpgrade(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto)request); - case 28: - return impl.distributedUpgradeProgress(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto)request); - case 29: - return impl.listCorruptFileBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto)request); - case 30: - return impl.metaSave(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto)request); - case 31: - return impl.getFileInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto)request); - case 32: - return impl.getFileLinkInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto)request); - case 33: - return impl.getContentSummary(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto)request); - case 34: - return impl.setQuota(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto)request); - case 35: - return impl.fsync(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto)request); - case 36: - return impl.setTimes(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto)request); - case 37: - return impl.createSymlink(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto)request); - case 38: - return impl.getLinkTarget(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto)request); - case 39: - return impl.updateBlockForPipeline(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto)request); - case 40: - return impl.updatePipeline(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto)request); - case 41: - return impl.getDelegationToken(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto)request); - case 42: - return impl.renewDelegationToken(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto)request); - case 43: - return impl.cancelDelegationToken(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto)request); - case 44: - return impl.setBalancerBandwidth(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto)request); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getRequestPrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getRequestPrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.getDefaultInstance(); - case 4: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.getDefaultInstance(); - case 5: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.getDefaultInstance(); - case 6: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.getDefaultInstance(); - case 7: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.getDefaultInstance(); - case 8: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.getDefaultInstance(); - case 9: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.getDefaultInstance(); - case 10: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.getDefaultInstance(); - case 11: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance(); - case 12: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.getDefaultInstance(); - case 13: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.getDefaultInstance(); - case 14: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.getDefaultInstance(); - case 15: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.getDefaultInstance(); - case 16: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.getDefaultInstance(); - case 17: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.getDefaultInstance(); - case 18: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.getDefaultInstance(); - case 19: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.getDefaultInstance(); - case 20: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.getDefaultInstance(); - case 21: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.getDefaultInstance(); - case 22: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.getDefaultInstance(); - case 23: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.getDefaultInstance(); - case 24: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.getDefaultInstance(); - case 25: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.getDefaultInstance(); - case 26: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.getDefaultInstance(); - case 27: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.getDefaultInstance(); - case 28: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto.getDefaultInstance(); - case 29: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.getDefaultInstance(); - case 30: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.getDefaultInstance(); - case 31: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.getDefaultInstance(); - case 32: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.getDefaultInstance(); - case 33: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.getDefaultInstance(); - case 34: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.getDefaultInstance(); - case 35: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.getDefaultInstance(); - case 36: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.getDefaultInstance(); - case 37: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.getDefaultInstance(); - case 38: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.getDefaultInstance(); - case 39: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.getDefaultInstance(); - case 40: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.getDefaultInstance(); - case 41: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto.getDefaultInstance(); - case 42: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto.getDefaultInstance(); - case 43: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto.getDefaultInstance(); - case 44: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getResponsePrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getResponsePrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.getDefaultInstance(); - case 4: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.getDefaultInstance(); - case 5: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.getDefaultInstance(); - case 6: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.getDefaultInstance(); - case 7: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.getDefaultInstance(); - case 8: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.getDefaultInstance(); - case 9: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.getDefaultInstance(); - case 10: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.getDefaultInstance(); - case 11: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance(); - case 12: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.getDefaultInstance(); - case 13: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.getDefaultInstance(); - case 14: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.getDefaultInstance(); - case 15: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.getDefaultInstance(); - case 16: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.getDefaultInstance(); - case 17: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.getDefaultInstance(); - case 18: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.getDefaultInstance(); - case 19: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.getDefaultInstance(); - case 20: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.getDefaultInstance(); - case 21: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.getDefaultInstance(); - case 22: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.getDefaultInstance(); - case 23: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.getDefaultInstance(); - case 24: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.getDefaultInstance(); - case 25: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.getDefaultInstance(); - case 26: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.getDefaultInstance(); - case 27: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.getDefaultInstance(); - case 28: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto.getDefaultInstance(); - case 29: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.getDefaultInstance(); - case 30: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.getDefaultInstance(); - case 31: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.getDefaultInstance(); - case 32: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.getDefaultInstance(); - case 33: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.getDefaultInstance(); - case 34: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.getDefaultInstance(); - case 35: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.getDefaultInstance(); - case 36: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.getDefaultInstance(); - case 37: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.getDefaultInstance(); - case 38: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.getDefaultInstance(); - case 39: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.getDefaultInstance(); - case 40: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.getDefaultInstance(); - case 41: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto.getDefaultInstance(); - case 42: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto.getDefaultInstance(); - case 43: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto.getDefaultInstance(); - case 44: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - }; - } - - public abstract void getBlockLocations( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getServerDefaults( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void create( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void append( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void setReplication( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void setPermission( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void setOwner( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void abandonBlock( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void addBlock( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getAdditionalDatanode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void complete( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void reportBadBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void concat( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void rename( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void rename2( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void delete( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void mkdirs( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getListing( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void renewLease( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void recoverLease( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getFsStats( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getDatanodeReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getPreferredBlockSize( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void setSafeMode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void saveNamespace( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void restoreFailedStorage( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void refreshNodes( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void finalizeUpgrade( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void distributedUpgradeProgress( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void listCorruptFileBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void metaSave( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getFileInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getFileLinkInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getContentSummary( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void setQuota( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void fsync( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void setTimes( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void createSymlink( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getLinkTarget( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void updateBlockForPipeline( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void updatePipeline( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void renewDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void cancelDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void setBalancerBandwidth( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto request, - com.google.protobuf.RpcCallback done); - - public static final - com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.getDescriptor().getServices().get(0); - } - public final com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptorForType() { - return getDescriptor(); - } - - public final void callMethod( - com.google.protobuf.Descriptors.MethodDescriptor method, - com.google.protobuf.RpcController controller, - com.google.protobuf.Message request, - com.google.protobuf.RpcCallback< - com.google.protobuf.Message> done) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.callMethod() given method descriptor for wrong " + - "service type."); - } - switch(method.getIndex()) { - case 0: - this.getBlockLocations(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 1: - this.getServerDefaults(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 2: - this.create(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 3: - this.append(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 4: - this.setReplication(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 5: - this.setPermission(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 6: - this.setOwner(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 7: - this.abandonBlock(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 8: - this.addBlock(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 9: - this.getAdditionalDatanode(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 10: - this.complete(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 11: - this.reportBadBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 12: - this.concat(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 13: - this.rename(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 14: - this.rename2(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 15: - this.delete(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 16: - this.mkdirs(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 17: - this.getListing(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 18: - this.renewLease(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 19: - this.recoverLease(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 20: - this.getFsStats(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 21: - this.getDatanodeReport(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 22: - this.getPreferredBlockSize(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 23: - this.setSafeMode(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 24: - this.saveNamespace(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 25: - this.restoreFailedStorage(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 26: - this.refreshNodes(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 27: - this.finalizeUpgrade(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 28: - this.distributedUpgradeProgress(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 29: - this.listCorruptFileBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 30: - this.metaSave(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 31: - this.getFileInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 32: - this.getFileLinkInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 33: - this.getContentSummary(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 34: - this.setQuota(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 35: - this.fsync(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 36: - this.setTimes(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 37: - this.createSymlink(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 38: - this.getLinkTarget(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 39: - this.updateBlockForPipeline(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 40: - this.updatePipeline(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 41: - this.getDelegationToken(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 42: - this.renewDelegationToken(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 43: - this.cancelDelegationToken(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 44: - this.setBalancerBandwidth(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getRequestPrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getRequestPrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.getDefaultInstance(); - case 4: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.getDefaultInstance(); - case 5: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.getDefaultInstance(); - case 6: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.getDefaultInstance(); - case 7: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.getDefaultInstance(); - case 8: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.getDefaultInstance(); - case 9: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.getDefaultInstance(); - case 10: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.getDefaultInstance(); - case 11: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance(); - case 12: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.getDefaultInstance(); - case 13: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.getDefaultInstance(); - case 14: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.getDefaultInstance(); - case 15: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.getDefaultInstance(); - case 16: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.getDefaultInstance(); - case 17: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.getDefaultInstance(); - case 18: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.getDefaultInstance(); - case 19: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.getDefaultInstance(); - case 20: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.getDefaultInstance(); - case 21: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.getDefaultInstance(); - case 22: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.getDefaultInstance(); - case 23: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.getDefaultInstance(); - case 24: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.getDefaultInstance(); - case 25: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.getDefaultInstance(); - case 26: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.getDefaultInstance(); - case 27: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.getDefaultInstance(); - case 28: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto.getDefaultInstance(); - case 29: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.getDefaultInstance(); - case 30: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.getDefaultInstance(); - case 31: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.getDefaultInstance(); - case 32: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.getDefaultInstance(); - case 33: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.getDefaultInstance(); - case 34: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.getDefaultInstance(); - case 35: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.getDefaultInstance(); - case 36: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.getDefaultInstance(); - case 37: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.getDefaultInstance(); - case 38: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.getDefaultInstance(); - case 39: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.getDefaultInstance(); - case 40: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.getDefaultInstance(); - case 41: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto.getDefaultInstance(); - case 42: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto.getDefaultInstance(); - case 43: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto.getDefaultInstance(); - case 44: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getResponsePrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getResponsePrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.getDefaultInstance(); - case 4: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.getDefaultInstance(); - case 5: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.getDefaultInstance(); - case 6: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.getDefaultInstance(); - case 7: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.getDefaultInstance(); - case 8: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.getDefaultInstance(); - case 9: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.getDefaultInstance(); - case 10: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.getDefaultInstance(); - case 11: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance(); - case 12: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.getDefaultInstance(); - case 13: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.getDefaultInstance(); - case 14: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.getDefaultInstance(); - case 15: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.getDefaultInstance(); - case 16: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.getDefaultInstance(); - case 17: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.getDefaultInstance(); - case 18: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.getDefaultInstance(); - case 19: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.getDefaultInstance(); - case 20: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.getDefaultInstance(); - case 21: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.getDefaultInstance(); - case 22: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.getDefaultInstance(); - case 23: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.getDefaultInstance(); - case 24: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.getDefaultInstance(); - case 25: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.getDefaultInstance(); - case 26: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.getDefaultInstance(); - case 27: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.getDefaultInstance(); - case 28: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto.getDefaultInstance(); - case 29: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.getDefaultInstance(); - case 30: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.getDefaultInstance(); - case 31: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.getDefaultInstance(); - case 32: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.getDefaultInstance(); - case 33: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.getDefaultInstance(); - case 34: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.getDefaultInstance(); - case 35: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.getDefaultInstance(); - case 36: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.getDefaultInstance(); - case 37: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.getDefaultInstance(); - case 38: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.getDefaultInstance(); - case 39: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.getDefaultInstance(); - case 40: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.getDefaultInstance(); - case 41: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto.getDefaultInstance(); - case 42: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto.getDefaultInstance(); - case 43: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto.getDefaultInstance(); - case 44: - return org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public static Stub newStub( - com.google.protobuf.RpcChannel channel) { - return new Stub(channel); - } - - public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol implements Interface { - private Stub(com.google.protobuf.RpcChannel channel) { - this.channel = channel; - } - - private final com.google.protobuf.RpcChannel channel; - - public com.google.protobuf.RpcChannel getChannel() { - return channel; - } - - public void getBlockLocations( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(0), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.getDefaultInstance())); - } - - public void getServerDefaults( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(1), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.getDefaultInstance())); - } - - public void create( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(2), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.getDefaultInstance())); - } - - public void append( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(3), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.getDefaultInstance())); - } - - public void setReplication( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(4), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.getDefaultInstance())); - } - - public void setPermission( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(5), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.getDefaultInstance())); - } - - public void setOwner( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(6), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.getDefaultInstance())); - } - - public void abandonBlock( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(7), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.getDefaultInstance())); - } - - public void addBlock( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(8), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.getDefaultInstance())); - } - - public void getAdditionalDatanode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(9), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.getDefaultInstance())); - } - - public void complete( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(10), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.getDefaultInstance())); - } - - public void reportBadBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(11), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance())); - } - - public void concat( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(12), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.getDefaultInstance())); - } - - public void rename( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(13), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.getDefaultInstance())); - } - - public void rename2( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(14), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.getDefaultInstance())); - } - - public void delete( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(15), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.getDefaultInstance())); - } - - public void mkdirs( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(16), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.getDefaultInstance())); - } - - public void getListing( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(17), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.getDefaultInstance())); - } - - public void renewLease( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(18), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.getDefaultInstance())); - } - - public void recoverLease( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(19), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.getDefaultInstance())); - } - - public void getFsStats( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(20), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.getDefaultInstance())); - } - - public void getDatanodeReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(21), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.getDefaultInstance())); - } - - public void getPreferredBlockSize( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(22), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.getDefaultInstance())); - } - - public void setSafeMode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(23), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.getDefaultInstance())); - } - - public void saveNamespace( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(24), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.getDefaultInstance())); - } - - public void restoreFailedStorage( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(25), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.getDefaultInstance())); - } - - public void refreshNodes( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(26), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.getDefaultInstance())); - } - - public void finalizeUpgrade( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(27), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.getDefaultInstance())); - } - - public void distributedUpgradeProgress( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(28), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto.getDefaultInstance())); - } - - public void listCorruptFileBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(29), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.getDefaultInstance())); - } - - public void metaSave( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(30), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.getDefaultInstance())); - } - - public void getFileInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(31), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.getDefaultInstance())); - } - - public void getFileLinkInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(32), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.getDefaultInstance())); - } - - public void getContentSummary( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(33), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.getDefaultInstance())); - } - - public void setQuota( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(34), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.getDefaultInstance())); - } - - public void fsync( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(35), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.getDefaultInstance())); - } - - public void setTimes( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(36), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.getDefaultInstance())); - } - - public void createSymlink( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(37), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.getDefaultInstance())); - } - - public void getLinkTarget( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(38), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.getDefaultInstance())); - } - - public void updateBlockForPipeline( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(39), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.getDefaultInstance())); - } - - public void updatePipeline( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(40), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.getDefaultInstance())); - } - - public void getDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(41), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto.getDefaultInstance())); - } - - public void renewDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(42), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto.getDefaultInstance())); - } - - public void cancelDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(43), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto.getDefaultInstance())); - } - - public void setBalancerBandwidth( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(44), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.getDefaultInstance())); - } - } - - public static BlockingInterface newBlockingStub( - com.google.protobuf.BlockingRpcChannel channel) { - return new BlockingStub(channel); - } - - public interface BlockingInterface { - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto getBlockLocations( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto getServerDefaults( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto create( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto append( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto setReplication( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto setPermission( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto setOwner( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto abandonBlock( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto addBlock( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto getAdditionalDatanode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto complete( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto reportBadBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto concat( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto rename( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto rename2( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto delete( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto mkdirs( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto getListing( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto renewLease( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto recoverLease( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto getFsStats( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto getDatanodeReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto getPreferredBlockSize( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto setSafeMode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto saveNamespace( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto restoreFailedStorage( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto refreshNodes( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto finalizeUpgrade( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto distributedUpgradeProgress( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto listCorruptFileBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto metaSave( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto getFileInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto getFileLinkInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto getContentSummary( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto setQuota( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto fsync( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto setTimes( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto createSymlink( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto getLinkTarget( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto updateBlockForPipeline( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto updatePipeline( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto getDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto renewDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto cancelDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto setBalancerBandwidth( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto request) - throws com.google.protobuf.ServiceException; - } - - private static final class BlockingStub implements BlockingInterface { - private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { - this.channel = channel; - } - - private final com.google.protobuf.BlockingRpcChannel channel; - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto getBlockLocations( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(0), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto getServerDefaults( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(1), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto create( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(2), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto append( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(3), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto setReplication( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(4), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto setPermission( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(5), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto setOwner( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(6), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto abandonBlock( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(7), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto addBlock( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(8), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto getAdditionalDatanode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(9), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto complete( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(10), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto reportBadBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(11), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto concat( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(12), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto rename( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(13), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto rename2( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(14), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto delete( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(15), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto mkdirs( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(16), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto getListing( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(17), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto renewLease( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(18), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto recoverLease( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(19), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto getFsStats( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(20), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto getDatanodeReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(21), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto getPreferredBlockSize( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(22), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto setSafeMode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(23), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto saveNamespace( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(24), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto restoreFailedStorage( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(25), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto refreshNodes( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(26), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto finalizeUpgrade( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(27), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto distributedUpgradeProgress( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(28), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto listCorruptFileBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(29), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto metaSave( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(30), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto getFileInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(31), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto getFileLinkInfo( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(32), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto getContentSummary( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(33), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto setQuota( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(34), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto fsync( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(35), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto setTimes( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(36), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto createSymlink( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(37), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto getLinkTarget( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(38), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto updateBlockForPipeline( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(39), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto updatePipeline( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(40), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto getDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(41), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto renewDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(42), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto cancelDelegationToken( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(43), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto setBalancerBandwidth( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(44), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.getDefaultInstance()); - } - - } - } - - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetBlockLocationsRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetBlockLocationsRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetBlockLocationsResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetBlockLocationsResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetServerDefaultsRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetServerDefaultsRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetServerDefaultsResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetServerDefaultsResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_CreateRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_CreateRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_CreateResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_CreateResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_AppendRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_AppendRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_AppendResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_AppendResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SetReplicationRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SetReplicationRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SetReplicationResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SetReplicationResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SetPermissionRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SetPermissionRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SetPermissionResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SetPermissionResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SetOwnerRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SetOwnerRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SetOwnerResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SetOwnerResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_AbandonBlockRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_AbandonBlockRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_AbandonBlockResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_AbandonBlockResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_AddBlockRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_AddBlockRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_AddBlockResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_AddBlockResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetAdditionalDatanodeRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetAdditionalDatanodeRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetAdditionalDatanodeResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetAdditionalDatanodeResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_CompleteRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_CompleteRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_CompleteResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_CompleteResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ReportBadBlocksRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ReportBadBlocksRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ReportBadBlocksResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ReportBadBlocksResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ConcatRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ConcatRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ConcatResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ConcatResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RenameRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RenameRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RenameResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RenameResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_Rename2RequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_Rename2RequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_Rename2ResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_Rename2ResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_DeleteRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_DeleteRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_DeleteResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_DeleteResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_MkdirsRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_MkdirsRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_MkdirsResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_MkdirsResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetListingRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetListingRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetListingResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetListingResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RenewLeaseRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RenewLeaseRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RenewLeaseResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RenewLeaseResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RecoverLeaseRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RecoverLeaseRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RecoverLeaseResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RecoverLeaseResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetFsStatusRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetFsStatusRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetFsStatsResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetFsStatsResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetDatanodeReportRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetDatanodeReportRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetDatanodeReportResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetDatanodeReportResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetPreferredBlockSizeRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetPreferredBlockSizeRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetPreferredBlockSizeResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetPreferredBlockSizeResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SetSafeModeRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SetSafeModeRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SetSafeModeResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SetSafeModeResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SaveNamespaceRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SaveNamespaceRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SaveNamespaceResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SaveNamespaceResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RestoreFailedStorageRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RestoreFailedStorageRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RestoreFailedStorageResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RestoreFailedStorageResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RefreshNodesRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RefreshNodesRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RefreshNodesResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RefreshNodesResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_FinalizeUpgradeRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_FinalizeUpgradeRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_FinalizeUpgradeResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_FinalizeUpgradeResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_DistributedUpgradeProgressRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_DistributedUpgradeProgressRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_DistributedUpgradeProgressResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_DistributedUpgradeProgressResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ListCorruptFileBlocksRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ListCorruptFileBlocksRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ListCorruptFileBlocksResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ListCorruptFileBlocksResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_MetaSaveRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_MetaSaveRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_MetaSaveResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_MetaSaveResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetFileInfoRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetFileInfoRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetFileInfoResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetFileInfoResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetFileLinkInfoRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetFileLinkInfoRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetFileLinkInfoResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetFileLinkInfoResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetContentSummaryRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetContentSummaryRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetContentSummaryResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetContentSummaryResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SetQuotaRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SetQuotaRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SetQuotaResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SetQuotaResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_FsyncRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_FsyncRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_FsyncResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_FsyncResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SetTimesRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SetTimesRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SetTimesResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SetTimesResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_CreateSymlinkRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_CreateSymlinkRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_CreateSymlinkResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_CreateSymlinkResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetLinkTargetRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetLinkTargetRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetLinkTargetResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetLinkTargetResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_UpdateBlockForPipelineRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_UpdateBlockForPipelineRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_UpdateBlockForPipelineResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_UpdateBlockForPipelineResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_UpdatePipelineRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_UpdatePipelineRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_UpdatePipelineResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_UpdatePipelineResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetDelegationTokenRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetDelegationTokenRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetDelegationTokenResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetDelegationTokenResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RenewDelegationTokenRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RenewDelegationTokenRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RenewDelegationTokenResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RenewDelegationTokenResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_CancelDelegationTokenRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_CancelDelegationTokenRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_CancelDelegationTokenResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_CancelDelegationTokenResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SetBalancerBandwidthRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SetBalancerBandwidthRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SetBalancerBandwidthResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SetBalancerBandwidthResponseProto_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; - } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n\034ClientNamenodeProtocol.proto\032\nhdfs.pro" + - "to\"L\n\035GetBlockLocationsRequestProto\022\013\n\003s" + - "rc\030\001 \002(\t\022\016\n\006offset\030\002 \002(\004\022\016\n\006length\030\003 \002(\004" + - "\"H\n\036GetBlockLocationsResponseProto\022&\n\tlo" + - "cations\030\001 \002(\0132\023.LocatedBlocksProto\"\037\n\035Ge" + - "tServerDefaultsRequestProto\"P\n\036GetServer" + - "DefaultsResponseProto\022.\n\016serverDefaults\030" + - "\001 \002(\0132\026.FsServerDefaultsProto\"\253\001\n\022Create" + - "RequestProto\022\013\n\003src\030\001 \002(\t\022\"\n\006masked\030\002 \002(" + - "\0132\022.FsPermissionProto\022\022\n\nclientName\030\003 \002(", - "\t\022\022\n\ncreateFlag\030\004 \002(\r\022\024\n\014createParent\030\005 " + - "\002(\010\022\023\n\013replication\030\006 \002(\r\022\021\n\tblockSize\030\007 " + - "\002(\004\"\025\n\023CreateResponseProto\"5\n\022AppendRequ" + - "estProto\022\013\n\003src\030\001 \002(\t\022\022\n\nclientName\030\002 \002(" + - "\t\"8\n\023AppendResponseProto\022!\n\005block\030\001 \002(\0132" + - "\022.LocatedBlockProto\">\n\032SetReplicationReq" + - "uestProto\022\013\n\003src\030\001 \002(\t\022\023\n\013replication\030\002 " + - "\002(\r\"-\n\033SetReplicationResponseProto\022\016\n\006re" + - "sult\030\001 \002(\010\"P\n\031SetPermissionRequestProto\022" + - "\013\n\003src\030\001 \002(\t\022&\n\npermission\030\002 \002(\0132\022.FsPer", - "missionProto\"\034\n\032SetPermissionResponsePro" + - "to\"H\n\024SetOwnerRequestProto\022\013\n\003src\030\001 \002(\t\022" + - "\020\n\010username\030\002 \002(\t\022\021\n\tgroupname\030\003 \002(\t\"\027\n\025" + - "SetOwnerResponseProto\"W\n\030AbandonBlockReq" + - "uestProto\022\036\n\001b\030\001 \002(\0132\023.ExtendedBlockProt" + - "o\022\013\n\003src\030\002 \002(\t\022\016\n\006holder\030\003 \002(\t\"\033\n\031Abando" + - "nBlockResponseProto\"\210\001\n\024AddBlockRequestP" + - "roto\022\013\n\003src\030\001 \002(\t\022\022\n\nclientName\030\002 \002(\t\022%\n" + - "\010previous\030\003 \002(\0132\023.ExtendedBlockProto\022(\n\014" + - "excludeNodes\030\004 \003(\0132\022.DatanodeInfoProto\":", - "\n\025AddBlockResponseProto\022!\n\005block\030\001 \002(\0132\022" + - ".LocatedBlockProto\"\317\001\n!GetAdditionalData" + - "nodeRequestProto\022\013\n\003src\030\001 \002(\t\022 \n\003blk\030\002 \002" + - "(\0132\023.ExtendedBlockProto\022%\n\texistings\030\003 \003" + - "(\0132\022.DatanodeInfoProto\022$\n\010excludes\030\004 \003(\013" + - "2\022.DatanodeInfoProto\022\032\n\022numAdditionalNod" + - "es\030\005 \002(\r\022\022\n\nclientName\030\006 \002(\t\"G\n\"GetAddit" + - "ionalDatanodeResponseProto\022!\n\005block\030\001 \002(" + - "\0132\022.LocatedBlockProto\"Z\n\024CompleteRequest" + - "Proto\022\013\n\003src\030\001 \002(\t\022\022\n\nclientName\030\002 \002(\t\022!", - "\n\004last\030\003 \002(\0132\023.ExtendedBlockProto\"\'\n\025Com" + - "pleteResponseProto\022\016\n\006result\030\001 \002(\010\"A\n\033Re" + - "portBadBlocksRequestProto\022\"\n\006blocks\030\001 \003(" + - "\0132\022.LocatedBlockProto\"\036\n\034ReportBadBlocks" + - "ResponseProto\"/\n\022ConcatRequestProto\022\013\n\003t" + - "rg\030\001 \002(\t\022\014\n\004srcs\030\002 \003(\t\"\025\n\023ConcatResponse" + - "Proto\".\n\022RenameRequestProto\022\013\n\003src\030\001 \002(\t" + - "\022\013\n\003dst\030\002 \002(\t\"%\n\023RenameResponseProto\022\016\n\006" + - "result\030\001 \002(\010\"F\n\023Rename2RequestProto\022\013\n\003s" + - "rc\030\001 \002(\t\022\013\n\003dst\030\002 \002(\t\022\025\n\roverwriteDest\030\003", - " \002(\010\"\026\n\024Rename2ResponseProto\"4\n\022DeleteRe" + - "questProto\022\013\n\003src\030\001 \002(\t\022\021\n\trecursive\030\002 \002" + - "(\010\"%\n\023DeleteResponseProto\022\016\n\006result\030\001 \002(" + - "\010\"[\n\022MkdirsRequestProto\022\013\n\003src\030\001 \002(\t\022\"\n\006" + - "masked\030\002 \002(\0132\022.FsPermissionProto\022\024\n\014crea" + - "teParent\030\003 \002(\010\"%\n\023MkdirsResponseProto\022\016\n" + - "\006result\030\001 \002(\010\"O\n\026GetListingRequestProto\022" + - "\013\n\003src\030\001 \002(\t\022\022\n\nstartAfter\030\002 \002(\014\022\024\n\014need" + - "Location\030\003 \002(\010\"B\n\027GetListingResponseProt" + - "o\022\'\n\007dirList\030\001 \002(\0132\026.DirectoryListingPro", - "to\",\n\026RenewLeaseRequestProto\022\022\n\nclientNa" + - "me\030\001 \002(\t\"\031\n\027RenewLeaseResponseProto\";\n\030R" + - "ecoverLeaseRequestProto\022\013\n\003src\030\001 \002(\t\022\022\n\n" + - "clientName\030\002 \002(\t\"+\n\031RecoverLeaseResponse" + - "Proto\022\016\n\006result\030\001 \002(\010\"\031\n\027GetFsStatusRequ" + - "estProto\"\226\001\n\027GetFsStatsResponseProto\022\020\n\010" + - "capacity\030\001 \002(\004\022\014\n\004used\030\002 \002(\004\022\021\n\tremainin" + - "g\030\003 \002(\004\022\030\n\020under_replicated\030\004 \002(\004\022\026\n\016cor" + - "rupt_blocks\030\005 \002(\004\022\026\n\016missing_blocks\030\006 \002(" + - "\004\"B\n\035GetDatanodeReportRequestProto\022!\n\004ty", - "pe\030\001 \002(\0162\023.DatanodeReportType\"@\n\036GetData" + - "nodeReportResponseProto\022\036\n\002di\030\001 \003(\0132\022.Da" + - "tanodeInfoProto\"5\n!GetPreferredBlockSize" + - "RequestProto\022\020\n\010filename\030\001 \002(\t\"3\n\"GetPre" + - "ferredBlockSizeResponseProto\022\r\n\005bsize\030\001 " + - "\002(\004\":\n\027SetSafeModeRequestProto\022\037\n\006action" + - "\030\001 \002(\0162\017.SafeModeAction\"*\n\030SetSafeModeRe" + - "sponseProto\022\016\n\006result\030\001 \002(\010\"\033\n\031SaveNames" + - "paceRequestProto\"\034\n\032SaveNamespaceRespons" + - "eProto\"/\n RestoreFailedStorageRequestPro", - "to\022\013\n\003arg\030\001 \002(\t\"3\n!RestoreFailedStorageR" + - "esponseProto\022\016\n\006result\030\001 \002(\010\"\032\n\030RefreshN" + - "odesRequestProto\"\033\n\031RefreshNodesResponse" + - "Proto\"\035\n\033FinalizeUpgradeRequestProto\"\036\n\034" + - "FinalizeUpgradeResponseProto\"H\n&Distribu" + - "tedUpgradeProgressRequestProto\022\036\n\006action" + - "\030\001 \002(\0162\016.UpgradeAction\"T\n\'DistributedUpg" + - "radeProgressResponseProto\022)\n\006report\030\001 \002(" + - "\0132\031.UpgradeStatusReportProto\"A\n!ListCorr" + - "uptFileBlocksRequestProto\022\014\n\004path\030\001 \002(\t\022", - "\016\n\006cookie\030\002 \002(\t\"N\n\"ListCorruptFileBlocks" + - "ResponseProto\022(\n\007corrupt\030\001 \002(\0132\027.Corrupt" + - "FileBlocksProto\"(\n\024MetaSaveRequestProto\022" + - "\020\n\010filename\030\001 \002(\t\"\027\n\025MetaSaveResponsePro" + - "to\"&\n\027GetFileInfoRequestProto\022\013\n\003src\030\001 \002" + - "(\t\"<\n\030GetFileInfoResponseProto\022 \n\002fs\030\001 \002" + - "(\0132\024.HdfsFileStatusProto\"*\n\033GetFileLinkI" + - "nfoRequestProto\022\013\n\003src\030\001 \002(\t\"@\n\034GetFileL" + - "inkInfoResponseProto\022 \n\002fs\030\001 \002(\0132\024.HdfsF" + - "ileStatusProto\"-\n\035GetContentSummaryReque", - "stProto\022\014\n\004path\030\001 \002(\t\"G\n\036GetContentSumma" + - "ryResponseProto\022%\n\007summary\030\001 \002(\0132\024.Conte" + - "ntSummaryProto\"T\n\024SetQuotaRequestProto\022\014" + - "\n\004path\030\001 \002(\t\022\026\n\016namespaceQuota\030\002 \002(\004\022\026\n\016" + - "diskspaceQuota\030\003 \002(\004\"\027\n\025SetQuotaResponse" + - "Proto\"0\n\021FsyncRequestProto\022\013\n\003src\030\001 \002(\t\022" + - "\016\n\006client\030\002 \002(\t\"\024\n\022FsyncResponseProto\"A\n" + - "\024SetTimesRequestProto\022\013\n\003src\030\001 \002(\t\022\r\n\005mt" + - "ime\030\002 \002(\004\022\r\n\005atime\030\003 \002(\004\"\027\n\025SetTimesResp" + - "onseProto\"t\n\031CreateSymlinkRequestProto\022\016", - "\n\006target\030\001 \002(\t\022\014\n\004link\030\002 \002(\t\022#\n\007dirPerm\030" + - "\003 \002(\0132\022.FsPermissionProto\022\024\n\014createParen" + - "t\030\004 \002(\010\"\034\n\032CreateSymlinkResponseProto\")\n" + - "\031GetLinkTargetRequestProto\022\014\n\004path\030\001 \002(\t" + - "\"0\n\032GetLinkTargetResponseProto\022\022\n\ntarget" + - "Path\030\001 \002(\t\"\\\n\"UpdateBlockForPipelineRequ" + - "estProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBlockP" + - "roto\022\022\n\nclientName\030\002 \002(\t\"H\n#UpdateBlockF" + - "orPipelineResponseProto\022!\n\005block\030\001 \002(\0132\022" + - ".LocatedBlockProto\"\242\001\n\032UpdatePipelineReq", - "uestProto\022\022\n\nclientName\030\001 \002(\t\022%\n\010oldBloc" + - "k\030\002 \002(\0132\023.ExtendedBlockProto\022%\n\010newBlock" + - "\030\003 \002(\0132\023.ExtendedBlockProto\022\"\n\010newNodes\030" + - "\004 \003(\0132\020.DatanodeIDProto\"\035\n\033UpdatePipelin" + - "eResponseProto\"1\n\036GetDelegationTokenRequ" + - "estProto\022\017\n\007renewer\030\001 \002(\t\"L\n\037GetDelegati" + - "onTokenResponseProto\022)\n\005token\030\001 \002(\0132\032.Bl" + - "ockTokenIdentifierProto\"M\n RenewDelegati" + - "onTokenRequestProto\022)\n\005token\030\001 \002(\0132\032.Blo" + - "ckTokenIdentifierProto\"9\n!RenewDelegatio", - "nTokenResponseProto\022\024\n\014newExireTime\030\001 \002(" + - "\004\"N\n!CancelDelegationTokenRequestProto\022)" + - "\n\005token\030\001 \002(\0132\032.BlockTokenIdentifierProt" + - "o\"$\n\"CancelDelegationTokenResponseProto\"" + - "5\n SetBalancerBandwidthRequestProto\022\021\n\tb" + - "andwidth\030\001 \002(\003\"#\n!SetBalancerBandwidthRe" + - "sponseProto*3\n\nCreateFlag\022\n\n\006CREATE\020\001\022\r\n" + - "\tOVERWRITE\020\002\022\n\n\006APPEND\020\004*1\n\022DatanodeRepo" + - "rtType\022\007\n\003ALL\020\001\022\010\n\004LIVE\020\003\022\010\n\004DEAD\020\003*J\n\016S" + - "afeModeAction\022\022\n\016SAFEMODE_LEAVE\020\001\022\022\n\016SAF", - "EMODE_ENTER\020\002\022\020\n\014SAFEMODE_GET\020\003*G\n\rUpgra" + - "deAction\022\016\n\nGET_STATUS\020\001\022\023\n\017DETAILED_STA" + - "TUS\020\002\022\021\n\rFORCE_PROCEED\020\0032\241\032\n\026ClientNamen" + - "odeProtocol\022T\n\021getBlockLocations\022\036.GetBl" + - "ockLocationsRequestProto\032\037.GetBlockLocat" + - "ionsResponseProto\022T\n\021getServerDefaults\022\036" + - ".GetServerDefaultsRequestProto\032\037.GetServ" + - "erDefaultsResponseProto\0223\n\006create\022\023.Crea" + - "teRequestProto\032\024.CreateResponseProto\0223\n\006" + - "append\022\023.AppendRequestProto\032\024.AppendResp", - "onseProto\022K\n\016setReplication\022\033.SetReplica" + - "tionRequestProto\032\034.SetReplicationRespons" + - "eProto\022H\n\rsetPermission\022\032.SetPermissionR" + - "equestProto\032\033.SetPermissionResponseProto" + - "\0229\n\010setOwner\022\025.SetOwnerRequestProto\032\026.Se" + - "tOwnerResponseProto\022E\n\014abandonBlock\022\031.Ab" + - "andonBlockRequestProto\032\032.AbandonBlockRes" + - "ponseProto\0229\n\010addBlock\022\025.AddBlockRequest" + - "Proto\032\026.AddBlockResponseProto\022`\n\025getAddi" + - "tionalDatanode\022\".GetAdditionalDatanodeRe", - "questProto\032#.GetAdditionalDatanodeRespon" + - "seProto\0229\n\010complete\022\025.CompleteRequestPro" + - "to\032\026.CompleteResponseProto\022N\n\017reportBadB" + - "locks\022\034.ReportBadBlocksRequestProto\032\035.Re" + - "portBadBlocksResponseProto\0223\n\006concat\022\023.C" + - "oncatRequestProto\032\024.ConcatResponseProto\022" + - "3\n\006rename\022\023.RenameRequestProto\032\024.RenameR" + - "esponseProto\0226\n\007rename2\022\024.Rename2Request" + - "Proto\032\025.Rename2ResponseProto\0223\n\006delete\022\023" + - ".DeleteRequestProto\032\024.DeleteResponseProt", - "o\0223\n\006mkdirs\022\023.MkdirsRequestProto\032\024.Mkdir" + - "sResponseProto\022?\n\ngetListing\022\027.GetListin" + - "gRequestProto\032\030.GetListingResponseProto\022" + - "?\n\nrenewLease\022\027.RenewLeaseRequestProto\032\030" + - ".RenewLeaseResponseProto\022E\n\014recoverLease" + - "\022\031.RecoverLeaseRequestProto\032\032.RecoverLea" + - "seResponseProto\022@\n\ngetFsStats\022\030.GetFsSta" + - "tusRequestProto\032\030.GetFsStatsResponseProt" + - "o\022T\n\021getDatanodeReport\022\036.GetDatanodeRepo" + - "rtRequestProto\032\037.GetDatanodeReportRespon", - "seProto\022`\n\025getPreferredBlockSize\022\".GetPr" + - "eferredBlockSizeRequestProto\032#.GetPrefer" + - "redBlockSizeResponseProto\022B\n\013setSafeMode" + - "\022\030.SetSafeModeRequestProto\032\031.SetSafeMode" + - "ResponseProto\022H\n\rsaveNamespace\022\032.SaveNam" + - "espaceRequestProto\032\033.SaveNamespaceRespon" + - "seProto\022]\n\024restoreFailedStorage\022!.Restor" + - "eFailedStorageRequestProto\032\".RestoreFail" + - "edStorageResponseProto\022E\n\014refreshNodes\022\031" + - ".RefreshNodesRequestProto\032\032.RefreshNodes", - "ResponseProto\022N\n\017finalizeUpgrade\022\034.Final" + - "izeUpgradeRequestProto\032\035.FinalizeUpgrade" + - "ResponseProto\022o\n\032distributedUpgradeProgr" + - "ess\022\'.DistributedUpgradeProgressRequestP" + - "roto\032(.DistributedUpgradeProgressRespons" + - "eProto\022`\n\025listCorruptFileBlocks\022\".ListCo" + - "rruptFileBlocksRequestProto\032#.ListCorrup" + - "tFileBlocksResponseProto\0229\n\010metaSave\022\025.M" + - "etaSaveRequestProto\032\026.MetaSaveResponsePr" + - "oto\022B\n\013getFileInfo\022\030.GetFileInfoRequestP", - "roto\032\031.GetFileInfoResponseProto\022N\n\017getFi" + - "leLinkInfo\022\034.GetFileLinkInfoRequestProto" + - "\032\035.GetFileLinkInfoResponseProto\022T\n\021getCo" + - "ntentSummary\022\036.GetContentSummaryRequestP" + - "roto\032\037.GetContentSummaryResponseProto\0229\n" + - "\010setQuota\022\025.SetQuotaRequestProto\032\026.SetQu" + - "otaResponseProto\0220\n\005fsync\022\022.FsyncRequest" + - "Proto\032\023.FsyncResponseProto\0229\n\010setTimes\022\025" + - ".SetTimesRequestProto\032\026.SetTimesResponse" + - "Proto\022H\n\rcreateSymlink\022\032.CreateSymlinkRe", - "questProto\032\033.CreateSymlinkResponseProto\022" + - "H\n\rgetLinkTarget\022\032.GetLinkTargetRequestP" + - "roto\032\033.GetLinkTargetResponseProto\022c\n\026upd" + - "ateBlockForPipeline\022#.UpdateBlockForPipe" + - "lineRequestProto\032$.UpdateBlockForPipelin" + - "eResponseProto\022K\n\016updatePipeline\022\033.Updat" + - "ePipelineRequestProto\032\034.UpdatePipelineRe" + - "sponseProto\022W\n\022getDelegationToken\022\037.GetD" + - "elegationTokenRequestProto\032 .GetDelegati" + - "onTokenResponseProto\022]\n\024renewDelegationT", - "oken\022!.RenewDelegationTokenRequestProto\032" + - "\".RenewDelegationTokenResponseProto\022`\n\025c" + - "ancelDelegationToken\022\".CancelDelegationT" + - "okenRequestProto\032#.CancelDelegationToken" + - "ResponseProto\022]\n\024setBalancerBandwidth\022!." + - "SetBalancerBandwidthRequestProto\032\".SetBa" + - "lancerBandwidthResponseProtoBK\n%org.apac" + - "he.hadoop.hdfs.protocol.protoB\034ClientNam" + - "enodeProtocolProtos\210\001\001\240\001\001" - }; - com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { - public com.google.protobuf.ExtensionRegistry assignDescriptors( - com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - internal_static_GetBlockLocationsRequestProto_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_GetBlockLocationsRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetBlockLocationsRequestProto_descriptor, - new java.lang.String[] { "Src", "Offset", "Length", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto.Builder.class); - internal_static_GetBlockLocationsResponseProto_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_GetBlockLocationsResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetBlockLocationsResponseProto_descriptor, - new java.lang.String[] { "Locations", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.Builder.class); - internal_static_GetServerDefaultsRequestProto_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_GetServerDefaultsRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetServerDefaultsRequestProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto.Builder.class); - internal_static_GetServerDefaultsResponseProto_descriptor = - getDescriptor().getMessageTypes().get(3); - internal_static_GetServerDefaultsResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetServerDefaultsResponseProto_descriptor, - new java.lang.String[] { "ServerDefaults", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto.Builder.class); - internal_static_CreateRequestProto_descriptor = - getDescriptor().getMessageTypes().get(4); - internal_static_CreateRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_CreateRequestProto_descriptor, - new java.lang.String[] { "Src", "Masked", "ClientName", "CreateFlag", "CreateParent", "Replication", "BlockSize", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto.Builder.class); - internal_static_CreateResponseProto_descriptor = - getDescriptor().getMessageTypes().get(5); - internal_static_CreateResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_CreateResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto.Builder.class); - internal_static_AppendRequestProto_descriptor = - getDescriptor().getMessageTypes().get(6); - internal_static_AppendRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_AppendRequestProto_descriptor, - new java.lang.String[] { "Src", "ClientName", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto.Builder.class); - internal_static_AppendResponseProto_descriptor = - getDescriptor().getMessageTypes().get(7); - internal_static_AppendResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_AppendResponseProto_descriptor, - new java.lang.String[] { "Block", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto.Builder.class); - internal_static_SetReplicationRequestProto_descriptor = - getDescriptor().getMessageTypes().get(8); - internal_static_SetReplicationRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SetReplicationRequestProto_descriptor, - new java.lang.String[] { "Src", "Replication", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto.Builder.class); - internal_static_SetReplicationResponseProto_descriptor = - getDescriptor().getMessageTypes().get(9); - internal_static_SetReplicationResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SetReplicationResponseProto_descriptor, - new java.lang.String[] { "Result", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationResponseProto.Builder.class); - internal_static_SetPermissionRequestProto_descriptor = - getDescriptor().getMessageTypes().get(10); - internal_static_SetPermissionRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SetPermissionRequestProto_descriptor, - new java.lang.String[] { "Src", "Permission", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionRequestProto.Builder.class); - internal_static_SetPermissionResponseProto_descriptor = - getDescriptor().getMessageTypes().get(11); - internal_static_SetPermissionResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SetPermissionResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPermissionResponseProto.Builder.class); - internal_static_SetOwnerRequestProto_descriptor = - getDescriptor().getMessageTypes().get(12); - internal_static_SetOwnerRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SetOwnerRequestProto_descriptor, - new java.lang.String[] { "Src", "Username", "Groupname", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto.Builder.class); - internal_static_SetOwnerResponseProto_descriptor = - getDescriptor().getMessageTypes().get(13); - internal_static_SetOwnerResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SetOwnerResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerResponseProto.Builder.class); - internal_static_AbandonBlockRequestProto_descriptor = - getDescriptor().getMessageTypes().get(14); - internal_static_AbandonBlockRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_AbandonBlockRequestProto_descriptor, - new java.lang.String[] { "B", "Src", "Holder", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto.Builder.class); - internal_static_AbandonBlockResponseProto_descriptor = - getDescriptor().getMessageTypes().get(15); - internal_static_AbandonBlockResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_AbandonBlockResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto.Builder.class); - internal_static_AddBlockRequestProto_descriptor = - getDescriptor().getMessageTypes().get(16); - internal_static_AddBlockRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_AddBlockRequestProto_descriptor, - new java.lang.String[] { "Src", "ClientName", "Previous", "ExcludeNodes", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto.Builder.class); - internal_static_AddBlockResponseProto_descriptor = - getDescriptor().getMessageTypes().get(17); - internal_static_AddBlockResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_AddBlockResponseProto_descriptor, - new java.lang.String[] { "Block", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto.Builder.class); - internal_static_GetAdditionalDatanodeRequestProto_descriptor = - getDescriptor().getMessageTypes().get(18); - internal_static_GetAdditionalDatanodeRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetAdditionalDatanodeRequestProto_descriptor, - new java.lang.String[] { "Src", "Blk", "Existings", "Excludes", "NumAdditionalNodes", "ClientName", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto.Builder.class); - internal_static_GetAdditionalDatanodeResponseProto_descriptor = - getDescriptor().getMessageTypes().get(19); - internal_static_GetAdditionalDatanodeResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetAdditionalDatanodeResponseProto_descriptor, - new java.lang.String[] { "Block", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto.Builder.class); - internal_static_CompleteRequestProto_descriptor = - getDescriptor().getMessageTypes().get(20); - internal_static_CompleteRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_CompleteRequestProto_descriptor, - new java.lang.String[] { "Src", "ClientName", "Last", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto.Builder.class); - internal_static_CompleteResponseProto_descriptor = - getDescriptor().getMessageTypes().get(21); - internal_static_CompleteResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_CompleteResponseProto_descriptor, - new java.lang.String[] { "Result", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto.Builder.class); - internal_static_ReportBadBlocksRequestProto_descriptor = - getDescriptor().getMessageTypes().get(22); - internal_static_ReportBadBlocksRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ReportBadBlocksRequestProto_descriptor, - new java.lang.String[] { "Blocks", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto.Builder.class); - internal_static_ReportBadBlocksResponseProto_descriptor = - getDescriptor().getMessageTypes().get(23); - internal_static_ReportBadBlocksResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ReportBadBlocksResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksResponseProto.Builder.class); - internal_static_ConcatRequestProto_descriptor = - getDescriptor().getMessageTypes().get(24); - internal_static_ConcatRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ConcatRequestProto_descriptor, - new java.lang.String[] { "Trg", "Srcs", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto.Builder.class); - internal_static_ConcatResponseProto_descriptor = - getDescriptor().getMessageTypes().get(25); - internal_static_ConcatResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ConcatResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatResponseProto.Builder.class); - internal_static_RenameRequestProto_descriptor = - getDescriptor().getMessageTypes().get(26); - internal_static_RenameRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RenameRequestProto_descriptor, - new java.lang.String[] { "Src", "Dst", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto.Builder.class); - internal_static_RenameResponseProto_descriptor = - getDescriptor().getMessageTypes().get(27); - internal_static_RenameResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RenameResponseProto_descriptor, - new java.lang.String[] { "Result", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto.Builder.class); - internal_static_Rename2RequestProto_descriptor = - getDescriptor().getMessageTypes().get(28); - internal_static_Rename2RequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_Rename2RequestProto_descriptor, - new java.lang.String[] { "Src", "Dst", "OverwriteDest", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto.Builder.class); - internal_static_Rename2ResponseProto_descriptor = - getDescriptor().getMessageTypes().get(29); - internal_static_Rename2ResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_Rename2ResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto.Builder.class); - internal_static_DeleteRequestProto_descriptor = - getDescriptor().getMessageTypes().get(30); - internal_static_DeleteRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_DeleteRequestProto_descriptor, - new java.lang.String[] { "Src", "Recursive", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto.Builder.class); - internal_static_DeleteResponseProto_descriptor = - getDescriptor().getMessageTypes().get(31); - internal_static_DeleteResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_DeleteResponseProto_descriptor, - new java.lang.String[] { "Result", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto.Builder.class); - internal_static_MkdirsRequestProto_descriptor = - getDescriptor().getMessageTypes().get(32); - internal_static_MkdirsRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_MkdirsRequestProto_descriptor, - new java.lang.String[] { "Src", "Masked", "CreateParent", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto.Builder.class); - internal_static_MkdirsResponseProto_descriptor = - getDescriptor().getMessageTypes().get(33); - internal_static_MkdirsResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_MkdirsResponseProto_descriptor, - new java.lang.String[] { "Result", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto.Builder.class); - internal_static_GetListingRequestProto_descriptor = - getDescriptor().getMessageTypes().get(34); - internal_static_GetListingRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetListingRequestProto_descriptor, - new java.lang.String[] { "Src", "StartAfter", "NeedLocation", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto.Builder.class); - internal_static_GetListingResponseProto_descriptor = - getDescriptor().getMessageTypes().get(35); - internal_static_GetListingResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetListingResponseProto_descriptor, - new java.lang.String[] { "DirList", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto.Builder.class); - internal_static_RenewLeaseRequestProto_descriptor = - getDescriptor().getMessageTypes().get(36); - internal_static_RenewLeaseRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RenewLeaseRequestProto_descriptor, - new java.lang.String[] { "ClientName", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto.Builder.class); - internal_static_RenewLeaseResponseProto_descriptor = - getDescriptor().getMessageTypes().get(37); - internal_static_RenewLeaseResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RenewLeaseResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto.Builder.class); - internal_static_RecoverLeaseRequestProto_descriptor = - getDescriptor().getMessageTypes().get(38); - internal_static_RecoverLeaseRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RecoverLeaseRequestProto_descriptor, - new java.lang.String[] { "Src", "ClientName", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto.Builder.class); - internal_static_RecoverLeaseResponseProto_descriptor = - getDescriptor().getMessageTypes().get(39); - internal_static_RecoverLeaseResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RecoverLeaseResponseProto_descriptor, - new java.lang.String[] { "Result", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto.Builder.class); - internal_static_GetFsStatusRequestProto_descriptor = - getDescriptor().getMessageTypes().get(40); - internal_static_GetFsStatusRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetFsStatusRequestProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto.Builder.class); - internal_static_GetFsStatsResponseProto_descriptor = - getDescriptor().getMessageTypes().get(41); - internal_static_GetFsStatsResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetFsStatsResponseProto_descriptor, - new java.lang.String[] { "Capacity", "Used", "Remaining", "UnderReplicated", "CorruptBlocks", "MissingBlocks", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto.Builder.class); - internal_static_GetDatanodeReportRequestProto_descriptor = - getDescriptor().getMessageTypes().get(42); - internal_static_GetDatanodeReportRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetDatanodeReportRequestProto_descriptor, - new java.lang.String[] { "Type", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto.Builder.class); - internal_static_GetDatanodeReportResponseProto_descriptor = - getDescriptor().getMessageTypes().get(43); - internal_static_GetDatanodeReportResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetDatanodeReportResponseProto_descriptor, - new java.lang.String[] { "Di", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto.Builder.class); - internal_static_GetPreferredBlockSizeRequestProto_descriptor = - getDescriptor().getMessageTypes().get(44); - internal_static_GetPreferredBlockSizeRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetPreferredBlockSizeRequestProto_descriptor, - new java.lang.String[] { "Filename", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto.Builder.class); - internal_static_GetPreferredBlockSizeResponseProto_descriptor = - getDescriptor().getMessageTypes().get(45); - internal_static_GetPreferredBlockSizeResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetPreferredBlockSizeResponseProto_descriptor, - new java.lang.String[] { "Bsize", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto.Builder.class); - internal_static_SetSafeModeRequestProto_descriptor = - getDescriptor().getMessageTypes().get(46); - internal_static_SetSafeModeRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SetSafeModeRequestProto_descriptor, - new java.lang.String[] { "Action", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto.Builder.class); - internal_static_SetSafeModeResponseProto_descriptor = - getDescriptor().getMessageTypes().get(47); - internal_static_SetSafeModeResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SetSafeModeResponseProto_descriptor, - new java.lang.String[] { "Result", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeResponseProto.Builder.class); - internal_static_SaveNamespaceRequestProto_descriptor = - getDescriptor().getMessageTypes().get(48); - internal_static_SaveNamespaceRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SaveNamespaceRequestProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto.Builder.class); - internal_static_SaveNamespaceResponseProto_descriptor = - getDescriptor().getMessageTypes().get(49); - internal_static_SaveNamespaceResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SaveNamespaceResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto.Builder.class); - internal_static_RestoreFailedStorageRequestProto_descriptor = - getDescriptor().getMessageTypes().get(50); - internal_static_RestoreFailedStorageRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RestoreFailedStorageRequestProto_descriptor, - new java.lang.String[] { "Arg", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto.Builder.class); - internal_static_RestoreFailedStorageResponseProto_descriptor = - getDescriptor().getMessageTypes().get(51); - internal_static_RestoreFailedStorageResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RestoreFailedStorageResponseProto_descriptor, - new java.lang.String[] { "Result", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto.Builder.class); - internal_static_RefreshNodesRequestProto_descriptor = - getDescriptor().getMessageTypes().get(52); - internal_static_RefreshNodesRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RefreshNodesRequestProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto.Builder.class); - internal_static_RefreshNodesResponseProto_descriptor = - getDescriptor().getMessageTypes().get(53); - internal_static_RefreshNodesResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RefreshNodesResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto.Builder.class); - internal_static_FinalizeUpgradeRequestProto_descriptor = - getDescriptor().getMessageTypes().get(54); - internal_static_FinalizeUpgradeRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_FinalizeUpgradeRequestProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto.Builder.class); - internal_static_FinalizeUpgradeResponseProto_descriptor = - getDescriptor().getMessageTypes().get(55); - internal_static_FinalizeUpgradeResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_FinalizeUpgradeResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto.Builder.class); - internal_static_DistributedUpgradeProgressRequestProto_descriptor = - getDescriptor().getMessageTypes().get(56); - internal_static_DistributedUpgradeProgressRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_DistributedUpgradeProgressRequestProto_descriptor, - new java.lang.String[] { "Action", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto.Builder.class); - internal_static_DistributedUpgradeProgressResponseProto_descriptor = - getDescriptor().getMessageTypes().get(57); - internal_static_DistributedUpgradeProgressResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_DistributedUpgradeProgressResponseProto_descriptor, - new java.lang.String[] { "Report", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto.Builder.class); - internal_static_ListCorruptFileBlocksRequestProto_descriptor = - getDescriptor().getMessageTypes().get(58); - internal_static_ListCorruptFileBlocksRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ListCorruptFileBlocksRequestProto_descriptor, - new java.lang.String[] { "Path", "Cookie", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto.Builder.class); - internal_static_ListCorruptFileBlocksResponseProto_descriptor = - getDescriptor().getMessageTypes().get(59); - internal_static_ListCorruptFileBlocksResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ListCorruptFileBlocksResponseProto_descriptor, - new java.lang.String[] { "Corrupt", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto.Builder.class); - internal_static_MetaSaveRequestProto_descriptor = - getDescriptor().getMessageTypes().get(60); - internal_static_MetaSaveRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_MetaSaveRequestProto_descriptor, - new java.lang.String[] { "Filename", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto.Builder.class); - internal_static_MetaSaveResponseProto_descriptor = - getDescriptor().getMessageTypes().get(61); - internal_static_MetaSaveResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_MetaSaveResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto.Builder.class); - internal_static_GetFileInfoRequestProto_descriptor = - getDescriptor().getMessageTypes().get(62); - internal_static_GetFileInfoRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetFileInfoRequestProto_descriptor, - new java.lang.String[] { "Src", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto.Builder.class); - internal_static_GetFileInfoResponseProto_descriptor = - getDescriptor().getMessageTypes().get(63); - internal_static_GetFileInfoResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetFileInfoResponseProto_descriptor, - new java.lang.String[] { "Fs", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto.Builder.class); - internal_static_GetFileLinkInfoRequestProto_descriptor = - getDescriptor().getMessageTypes().get(64); - internal_static_GetFileLinkInfoRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetFileLinkInfoRequestProto_descriptor, - new java.lang.String[] { "Src", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto.Builder.class); - internal_static_GetFileLinkInfoResponseProto_descriptor = - getDescriptor().getMessageTypes().get(65); - internal_static_GetFileLinkInfoResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetFileLinkInfoResponseProto_descriptor, - new java.lang.String[] { "Fs", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto.Builder.class); - internal_static_GetContentSummaryRequestProto_descriptor = - getDescriptor().getMessageTypes().get(66); - internal_static_GetContentSummaryRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetContentSummaryRequestProto_descriptor, - new java.lang.String[] { "Path", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto.Builder.class); - internal_static_GetContentSummaryResponseProto_descriptor = - getDescriptor().getMessageTypes().get(67); - internal_static_GetContentSummaryResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetContentSummaryResponseProto_descriptor, - new java.lang.String[] { "Summary", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto.Builder.class); - internal_static_SetQuotaRequestProto_descriptor = - getDescriptor().getMessageTypes().get(68); - internal_static_SetQuotaRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SetQuotaRequestProto_descriptor, - new java.lang.String[] { "Path", "NamespaceQuota", "DiskspaceQuota", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto.Builder.class); - internal_static_SetQuotaResponseProto_descriptor = - getDescriptor().getMessageTypes().get(69); - internal_static_SetQuotaResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SetQuotaResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaResponseProto.Builder.class); - internal_static_FsyncRequestProto_descriptor = - getDescriptor().getMessageTypes().get(70); - internal_static_FsyncRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_FsyncRequestProto_descriptor, - new java.lang.String[] { "Src", "Client", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto.Builder.class); - internal_static_FsyncResponseProto_descriptor = - getDescriptor().getMessageTypes().get(71); - internal_static_FsyncResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_FsyncResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncResponseProto.Builder.class); - internal_static_SetTimesRequestProto_descriptor = - getDescriptor().getMessageTypes().get(72); - internal_static_SetTimesRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SetTimesRequestProto_descriptor, - new java.lang.String[] { "Src", "Mtime", "Atime", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto.Builder.class); - internal_static_SetTimesResponseProto_descriptor = - getDescriptor().getMessageTypes().get(73); - internal_static_SetTimesResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SetTimesResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto.Builder.class); - internal_static_CreateSymlinkRequestProto_descriptor = - getDescriptor().getMessageTypes().get(74); - internal_static_CreateSymlinkRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_CreateSymlinkRequestProto_descriptor, - new java.lang.String[] { "Target", "Link", "DirPerm", "CreateParent", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto.Builder.class); - internal_static_CreateSymlinkResponseProto_descriptor = - getDescriptor().getMessageTypes().get(75); - internal_static_CreateSymlinkResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_CreateSymlinkResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto.Builder.class); - internal_static_GetLinkTargetRequestProto_descriptor = - getDescriptor().getMessageTypes().get(76); - internal_static_GetLinkTargetRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetLinkTargetRequestProto_descriptor, - new java.lang.String[] { "Path", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto.Builder.class); - internal_static_GetLinkTargetResponseProto_descriptor = - getDescriptor().getMessageTypes().get(77); - internal_static_GetLinkTargetResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetLinkTargetResponseProto_descriptor, - new java.lang.String[] { "TargetPath", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto.Builder.class); - internal_static_UpdateBlockForPipelineRequestProto_descriptor = - getDescriptor().getMessageTypes().get(78); - internal_static_UpdateBlockForPipelineRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_UpdateBlockForPipelineRequestProto_descriptor, - new java.lang.String[] { "Block", "ClientName", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto.Builder.class); - internal_static_UpdateBlockForPipelineResponseProto_descriptor = - getDescriptor().getMessageTypes().get(79); - internal_static_UpdateBlockForPipelineResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_UpdateBlockForPipelineResponseProto_descriptor, - new java.lang.String[] { "Block", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto.Builder.class); - internal_static_UpdatePipelineRequestProto_descriptor = - getDescriptor().getMessageTypes().get(80); - internal_static_UpdatePipelineRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_UpdatePipelineRequestProto_descriptor, - new java.lang.String[] { "ClientName", "OldBlock", "NewBlock", "NewNodes", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto.Builder.class); - internal_static_UpdatePipelineResponseProto_descriptor = - getDescriptor().getMessageTypes().get(81); - internal_static_UpdatePipelineResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_UpdatePipelineResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto.Builder.class); - internal_static_GetDelegationTokenRequestProto_descriptor = - getDescriptor().getMessageTypes().get(82); - internal_static_GetDelegationTokenRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetDelegationTokenRequestProto_descriptor, - new java.lang.String[] { "Renewer", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto.Builder.class); - internal_static_GetDelegationTokenResponseProto_descriptor = - getDescriptor().getMessageTypes().get(83); - internal_static_GetDelegationTokenResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetDelegationTokenResponseProto_descriptor, - new java.lang.String[] { "Token", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto.Builder.class); - internal_static_RenewDelegationTokenRequestProto_descriptor = - getDescriptor().getMessageTypes().get(84); - internal_static_RenewDelegationTokenRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RenewDelegationTokenRequestProto_descriptor, - new java.lang.String[] { "Token", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto.Builder.class); - internal_static_RenewDelegationTokenResponseProto_descriptor = - getDescriptor().getMessageTypes().get(85); - internal_static_RenewDelegationTokenResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RenewDelegationTokenResponseProto_descriptor, - new java.lang.String[] { "NewExireTime", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto.Builder.class); - internal_static_CancelDelegationTokenRequestProto_descriptor = - getDescriptor().getMessageTypes().get(86); - internal_static_CancelDelegationTokenRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_CancelDelegationTokenRequestProto_descriptor, - new java.lang.String[] { "Token", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto.Builder.class); - internal_static_CancelDelegationTokenResponseProto_descriptor = - getDescriptor().getMessageTypes().get(87); - internal_static_CancelDelegationTokenResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_CancelDelegationTokenResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto.Builder.class); - internal_static_SetBalancerBandwidthRequestProto_descriptor = - getDescriptor().getMessageTypes().get(88); - internal_static_SetBalancerBandwidthRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SetBalancerBandwidthRequestProto_descriptor, - new java.lang.String[] { "Bandwidth", }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto.Builder.class); - internal_static_SetBalancerBandwidthResponseProto_descriptor = - getDescriptor().getMessageTypes().get(89); - internal_static_SetBalancerBandwidthResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SetBalancerBandwidthResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthResponseProto.Builder.class); - return null; - } - }; - com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(), - }, assigner); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/DataTransferProtos.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/DataTransferProtos.java deleted file mode 100644 index c50b40d5bbc..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/DataTransferProtos.java +++ /dev/null @@ -1,10690 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: datatransfer.proto - -package org.apache.hadoop.hdfs.protocol.proto; - -public final class DataTransferProtos { - private DataTransferProtos() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistry registry) { - } - public enum Status - implements com.google.protobuf.ProtocolMessageEnum { - SUCCESS(0, 0), - ERROR(1, 1), - ERROR_CHECKSUM(2, 2), - ERROR_INVALID(3, 3), - ERROR_EXISTS(4, 4), - ERROR_ACCESS_TOKEN(5, 5), - CHECKSUM_OK(6, 6), - ; - - public static final int SUCCESS_VALUE = 0; - public static final int ERROR_VALUE = 1; - public static final int ERROR_CHECKSUM_VALUE = 2; - public static final int ERROR_INVALID_VALUE = 3; - public static final int ERROR_EXISTS_VALUE = 4; - public static final int ERROR_ACCESS_TOKEN_VALUE = 5; - public static final int CHECKSUM_OK_VALUE = 6; - - - public final int getNumber() { return value; } - - public static Status valueOf(int value) { - switch (value) { - case 0: return SUCCESS; - case 1: return ERROR; - case 2: return ERROR_CHECKSUM; - case 3: return ERROR_INVALID; - case 4: return ERROR_EXISTS; - case 5: return ERROR_ACCESS_TOKEN; - case 6: return CHECKSUM_OK; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public Status findValueByNumber(int number) { - return Status.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.getDescriptor().getEnumTypes().get(0); - } - - private static final Status[] VALUES = { - SUCCESS, ERROR, ERROR_CHECKSUM, ERROR_INVALID, ERROR_EXISTS, ERROR_ACCESS_TOKEN, CHECKSUM_OK, - }; - - public static Status valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private Status(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:Status) - } - - public interface BaseHeaderProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ExtendedBlockProto block = 1; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder(); - - // optional .BlockTokenIdentifierProto token = 2; - boolean hasToken(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder(); - } - public static final class BaseHeaderProto extends - com.google.protobuf.GeneratedMessage - implements BaseHeaderProtoOrBuilder { - // Use BaseHeaderProto.newBuilder() to construct. - private BaseHeaderProto(Builder builder) { - super(builder); - } - private BaseHeaderProto(boolean noInit) {} - - private static final BaseHeaderProto defaultInstance; - public static BaseHeaderProto getDefaultInstance() { - return defaultInstance; - } - - public BaseHeaderProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BaseHeaderProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BaseHeaderProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ExtendedBlockProto block = 1; - public static final int BLOCK_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - // optional .BlockTokenIdentifierProto token = 2; - public static final int TOKEN_FIELD_NUMBER = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_; - public boolean hasToken() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() { - return token_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() { - return token_; - } - - private void initFields() { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - if (hasToken()) { - if (!getToken().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, block_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, token_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, block_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, token_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto) obj; - - boolean result = true; - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && (hasToken() == other.hasToken()); - if (hasToken()) { - result = result && getToken() - .equals(other.getToken()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - if (hasToken()) { - hash = (37 * hash) + TOKEN_FIELD_NUMBER; - hash = (53 * hash) + getToken().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BaseHeaderProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BaseHeaderProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - getTokenFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - if (tokenBuilder_ == null) { - token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - } else { - tokenBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (tokenBuilder_ == null) { - result.token_ = token_; - } else { - result.token_ = tokenBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) return this; - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - if (other.hasToken()) { - mergeToken(other.getToken()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlock()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - if (hasToken()) { - if (!getToken().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - case 18: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(); - if (hasToken()) { - subBuilder.mergeFrom(getToken()); - } - input.readMessage(subBuilder, extensionRegistry); - setToken(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ExtendedBlockProto block = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // optional .BlockTokenIdentifierProto token = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> tokenBuilder_; - public boolean hasToken() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() { - if (tokenBuilder_ == null) { - return token_; - } else { - return tokenBuilder_.getMessage(); - } - } - public Builder setToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) { - if (tokenBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - token_ = value; - onChanged(); - } else { - tokenBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setToken( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder builderForValue) { - if (tokenBuilder_ == null) { - token_ = builderForValue.build(); - onChanged(); - } else { - tokenBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergeToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) { - if (tokenBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - token_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance()) { - token_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(token_).mergeFrom(value).buildPartial(); - } else { - token_ = value; - } - onChanged(); - } else { - tokenBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearToken() { - if (tokenBuilder_ == null) { - token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - onChanged(); - } else { - tokenBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder getTokenBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getTokenFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() { - if (tokenBuilder_ != null) { - return tokenBuilder_.getMessageOrBuilder(); - } else { - return token_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> - getTokenFieldBuilder() { - if (tokenBuilder_ == null) { - tokenBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder>( - token_, - getParentForChildren(), - isClean()); - token_ = null; - } - return tokenBuilder_; - } - - // @@protoc_insertion_point(builder_scope:BaseHeaderProto) - } - - static { - defaultInstance = new BaseHeaderProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:BaseHeaderProto) - } - - public interface ClientOperationHeaderProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .BaseHeaderProto baseHeader = 1; - boolean hasBaseHeader(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder(); - - // required string clientName = 2; - boolean hasClientName(); - String getClientName(); - } - public static final class ClientOperationHeaderProto extends - com.google.protobuf.GeneratedMessage - implements ClientOperationHeaderProtoOrBuilder { - // Use ClientOperationHeaderProto.newBuilder() to construct. - private ClientOperationHeaderProto(Builder builder) { - super(builder); - } - private ClientOperationHeaderProto(boolean noInit) {} - - private static final ClientOperationHeaderProto defaultInstance; - public static ClientOperationHeaderProto getDefaultInstance() { - return defaultInstance; - } - - public ClientOperationHeaderProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientOperationHeaderProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientOperationHeaderProto_fieldAccessorTable; - } - - private int bitField0_; - // required .BaseHeaderProto baseHeader = 1; - public static final int BASEHEADER_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto baseHeader_; - public boolean hasBaseHeader() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader() { - return baseHeader_; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder() { - return baseHeader_; - } - - // required string clientName = 2; - public static final int CLIENTNAME_FIELD_NUMBER = 2; - private java.lang.Object clientName_; - public boolean hasClientName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - clientName_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getClientNameBytes() { - java.lang.Object ref = clientName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - clientName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - clientName_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBaseHeader()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasClientName()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBaseHeader().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, baseHeader_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getClientNameBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, baseHeader_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getClientNameBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto) obj; - - boolean result = true; - result = result && (hasBaseHeader() == other.hasBaseHeader()); - if (hasBaseHeader()) { - result = result && getBaseHeader() - .equals(other.getBaseHeader()); - } - result = result && (hasClientName() == other.hasClientName()); - if (hasClientName()) { - result = result && getClientName() - .equals(other.getClientName()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBaseHeader()) { - hash = (37 * hash) + BASEHEADER_FIELD_NUMBER; - hash = (53 * hash) + getBaseHeader().hashCode(); - } - if (hasClientName()) { - hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER; - hash = (53 * hash) + getClientName().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientOperationHeaderProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientOperationHeaderProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBaseHeaderFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (baseHeaderBuilder_ == null) { - baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - } else { - baseHeaderBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - clientName_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (baseHeaderBuilder_ == null) { - result.baseHeader_ = baseHeader_; - } else { - result.baseHeader_ = baseHeaderBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.clientName_ = clientName_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) return this; - if (other.hasBaseHeader()) { - mergeBaseHeader(other.getBaseHeader()); - } - if (other.hasClientName()) { - setClientName(other.getClientName()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBaseHeader()) { - - return false; - } - if (!hasClientName()) { - - return false; - } - if (!getBaseHeader().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(); - if (hasBaseHeader()) { - subBuilder.mergeFrom(getBaseHeader()); - } - input.readMessage(subBuilder, extensionRegistry); - setBaseHeader(subBuilder.buildPartial()); - break; - } - case 18: { - bitField0_ |= 0x00000002; - clientName_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required .BaseHeaderProto baseHeader = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> baseHeaderBuilder_; - public boolean hasBaseHeader() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader() { - if (baseHeaderBuilder_ == null) { - return baseHeader_; - } else { - return baseHeaderBuilder_.getMessage(); - } - } - public Builder setBaseHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) { - if (baseHeaderBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - baseHeader_ = value; - onChanged(); - } else { - baseHeaderBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBaseHeader( - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) { - if (baseHeaderBuilder_ == null) { - baseHeader_ = builderForValue.build(); - onChanged(); - } else { - baseHeaderBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBaseHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) { - if (baseHeaderBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - baseHeader_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) { - baseHeader_ = - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(baseHeader_).mergeFrom(value).buildPartial(); - } else { - baseHeader_ = value; - } - onChanged(); - } else { - baseHeaderBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBaseHeader() { - if (baseHeaderBuilder_ == null) { - baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - onChanged(); - } else { - baseHeaderBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getBaseHeaderBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBaseHeaderFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder() { - if (baseHeaderBuilder_ != null) { - return baseHeaderBuilder_.getMessageOrBuilder(); - } else { - return baseHeader_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> - getBaseHeaderFieldBuilder() { - if (baseHeaderBuilder_ == null) { - baseHeaderBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>( - baseHeader_, - getParentForChildren(), - isClean()); - baseHeader_ = null; - } - return baseHeaderBuilder_; - } - - // required string clientName = 2; - private java.lang.Object clientName_ = ""; - public boolean hasClientName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getClientName() { - java.lang.Object ref = clientName_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - clientName_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setClientName(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - clientName_ = value; - onChanged(); - return this; - } - public Builder clearClientName() { - bitField0_ = (bitField0_ & ~0x00000002); - clientName_ = getDefaultInstance().getClientName(); - onChanged(); - return this; - } - void setClientName(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - clientName_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:ClientOperationHeaderProto) - } - - static { - defaultInstance = new ClientOperationHeaderProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ClientOperationHeaderProto) - } - - public interface OpReadBlockProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ClientOperationHeaderProto header = 1; - boolean hasHeader(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder(); - - // required uint64 offset = 2; - boolean hasOffset(); - long getOffset(); - - // required uint64 len = 3; - boolean hasLen(); - long getLen(); - } - public static final class OpReadBlockProto extends - com.google.protobuf.GeneratedMessage - implements OpReadBlockProtoOrBuilder { - // Use OpReadBlockProto.newBuilder() to construct. - private OpReadBlockProto(Builder builder) { - super(builder); - } - private OpReadBlockProto(boolean noInit) {} - - private static final OpReadBlockProto defaultInstance; - public static OpReadBlockProto getDefaultInstance() { - return defaultInstance; - } - - public OpReadBlockProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReadBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReadBlockProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ClientOperationHeaderProto header = 1; - public static final int HEADER_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_; - public boolean hasHeader() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() { - return header_; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() { - return header_; - } - - // required uint64 offset = 2; - public static final int OFFSET_FIELD_NUMBER = 2; - private long offset_; - public boolean hasOffset() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getOffset() { - return offset_; - } - - // required uint64 len = 3; - public static final int LEN_FIELD_NUMBER = 3; - private long len_; - public boolean hasLen() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getLen() { - return len_; - } - - private void initFields() { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance(); - offset_ = 0L; - len_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasHeader()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasOffset()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasLen()) { - memoizedIsInitialized = 0; - return false; - } - if (!getHeader().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, header_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, offset_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, len_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, header_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, offset_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, len_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto) obj; - - boolean result = true; - result = result && (hasHeader() == other.hasHeader()); - if (hasHeader()) { - result = result && getHeader() - .equals(other.getHeader()); - } - result = result && (hasOffset() == other.hasOffset()); - if (hasOffset()) { - result = result && (getOffset() - == other.getOffset()); - } - result = result && (hasLen() == other.hasLen()); - if (hasLen()) { - result = result && (getLen() - == other.getLen()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasHeader()) { - hash = (37 * hash) + HEADER_FIELD_NUMBER; - hash = (53 * hash) + getHeader().hashCode(); - } - if (hasOffset()) { - hash = (37 * hash) + OFFSET_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getOffset()); - } - if (hasLen()) { - hash = (37 * hash) + LEN_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getLen()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReadBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReadBlockProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getHeaderFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (headerBuilder_ == null) { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance(); - } else { - headerBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - offset_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - len_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (headerBuilder_ == null) { - result.header_ = header_; - } else { - result.header_ = headerBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.offset_ = offset_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.len_ = len_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.getDefaultInstance()) return this; - if (other.hasHeader()) { - mergeHeader(other.getHeader()); - } - if (other.hasOffset()) { - setOffset(other.getOffset()); - } - if (other.hasLen()) { - setLen(other.getLen()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasHeader()) { - - return false; - } - if (!hasOffset()) { - - return false; - } - if (!hasLen()) { - - return false; - } - if (!getHeader().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(); - if (hasHeader()) { - subBuilder.mergeFrom(getHeader()); - } - input.readMessage(subBuilder, extensionRegistry); - setHeader(subBuilder.buildPartial()); - break; - } - case 16: { - bitField0_ |= 0x00000002; - offset_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - len_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required .ClientOperationHeaderProto header = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_; - public boolean hasHeader() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() { - if (headerBuilder_ == null) { - return header_; - } else { - return headerBuilder_.getMessage(); - } - } - public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) { - if (headerBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - header_ = value; - onChanged(); - } else { - headerBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setHeader( - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) { - if (headerBuilder_ == null) { - header_ = builderForValue.build(); - onChanged(); - } else { - headerBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) { - if (headerBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) { - header_ = - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial(); - } else { - header_ = value; - } - onChanged(); - } else { - headerBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearHeader() { - if (headerBuilder_ == null) { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance(); - onChanged(); - } else { - headerBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getHeaderFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() { - if (headerBuilder_ != null) { - return headerBuilder_.getMessageOrBuilder(); - } else { - return header_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> - getHeaderFieldBuilder() { - if (headerBuilder_ == null) { - headerBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>( - header_, - getParentForChildren(), - isClean()); - header_ = null; - } - return headerBuilder_; - } - - // required uint64 offset = 2; - private long offset_ ; - public boolean hasOffset() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getOffset() { - return offset_; - } - public Builder setOffset(long value) { - bitField0_ |= 0x00000002; - offset_ = value; - onChanged(); - return this; - } - public Builder clearOffset() { - bitField0_ = (bitField0_ & ~0x00000002); - offset_ = 0L; - onChanged(); - return this; - } - - // required uint64 len = 3; - private long len_ ; - public boolean hasLen() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getLen() { - return len_; - } - public Builder setLen(long value) { - bitField0_ |= 0x00000004; - len_ = value; - onChanged(); - return this; - } - public Builder clearLen() { - bitField0_ = (bitField0_ & ~0x00000004); - len_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:OpReadBlockProto) - } - - static { - defaultInstance = new OpReadBlockProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:OpReadBlockProto) - } - - public interface ChecksumProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ChecksumProto.ChecksumType type = 1; - boolean hasType(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType getType(); - - // required uint32 bytesPerChecksum = 2; - boolean hasBytesPerChecksum(); - int getBytesPerChecksum(); - } - public static final class ChecksumProto extends - com.google.protobuf.GeneratedMessage - implements ChecksumProtoOrBuilder { - // Use ChecksumProto.newBuilder() to construct. - private ChecksumProto(Builder builder) { - super(builder); - } - private ChecksumProto(boolean noInit) {} - - private static final ChecksumProto defaultInstance; - public static ChecksumProto getDefaultInstance() { - return defaultInstance; - } - - public ChecksumProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ChecksumProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ChecksumProto_fieldAccessorTable; - } - - public enum ChecksumType - implements com.google.protobuf.ProtocolMessageEnum { - NULL(0, 0), - CRC32(1, 1), - CRC32C(2, 2), - ; - - public static final int NULL_VALUE = 0; - public static final int CRC32_VALUE = 1; - public static final int CRC32C_VALUE = 2; - - - public final int getNumber() { return value; } - - public static ChecksumType valueOf(int value) { - switch (value) { - case 0: return NULL; - case 1: return CRC32; - case 2: return CRC32C; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public ChecksumType findValueByNumber(int number) { - return ChecksumType.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDescriptor().getEnumTypes().get(0); - } - - private static final ChecksumType[] VALUES = { - NULL, CRC32, CRC32C, - }; - - public static ChecksumType valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private ChecksumType(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:ChecksumProto.ChecksumType) - } - - private int bitField0_; - // required .ChecksumProto.ChecksumType type = 1; - public static final int TYPE_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType type_; - public boolean hasType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType getType() { - return type_; - } - - // required uint32 bytesPerChecksum = 2; - public static final int BYTESPERCHECKSUM_FIELD_NUMBER = 2; - private int bytesPerChecksum_; - public boolean hasBytesPerChecksum() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getBytesPerChecksum() { - return bytesPerChecksum_; - } - - private void initFields() { - type_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType.NULL; - bytesPerChecksum_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasType()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBytesPerChecksum()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, type_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt32(2, bytesPerChecksum_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, type_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(2, bytesPerChecksum_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) obj; - - boolean result = true; - result = result && (hasType() == other.hasType()); - if (hasType()) { - result = result && - (getType() == other.getType()); - } - result = result && (hasBytesPerChecksum() == other.hasBytesPerChecksum()); - if (hasBytesPerChecksum()) { - result = result && (getBytesPerChecksum() - == other.getBytesPerChecksum()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasType()) { - hash = (37 * hash) + TYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getType()); - } - if (hasBytesPerChecksum()) { - hash = (37 * hash) + BYTESPERCHECKSUM_FIELD_NUMBER; - hash = (53 * hash) + getBytesPerChecksum(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ChecksumProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ChecksumProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - type_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType.NULL; - bitField0_ = (bitField0_ & ~0x00000001); - bytesPerChecksum_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.type_ = type_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.bytesPerChecksum_ = bytesPerChecksum_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) return this; - if (other.hasType()) { - setType(other.getType()); - } - if (other.hasBytesPerChecksum()) { - setBytesPerChecksum(other.getBytesPerChecksum()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasType()) { - - return false; - } - if (!hasBytesPerChecksum()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - type_ = value; - } - break; - } - case 16: { - bitField0_ |= 0x00000002; - bytesPerChecksum_ = input.readUInt32(); - break; - } - } - } - } - - private int bitField0_; - - // required .ChecksumProto.ChecksumType type = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType type_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType.NULL; - public boolean hasType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType getType() { - return type_; - } - public Builder setType(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - type_ = value; - onChanged(); - return this; - } - public Builder clearType() { - bitField0_ = (bitField0_ & ~0x00000001); - type_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType.NULL; - onChanged(); - return this; - } - - // required uint32 bytesPerChecksum = 2; - private int bytesPerChecksum_ ; - public boolean hasBytesPerChecksum() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getBytesPerChecksum() { - return bytesPerChecksum_; - } - public Builder setBytesPerChecksum(int value) { - bitField0_ |= 0x00000002; - bytesPerChecksum_ = value; - onChanged(); - return this; - } - public Builder clearBytesPerChecksum() { - bitField0_ = (bitField0_ & ~0x00000002); - bytesPerChecksum_ = 0; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:ChecksumProto) - } - - static { - defaultInstance = new ChecksumProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ChecksumProto) - } - - public interface OpWriteBlockProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ClientOperationHeaderProto header = 1; - boolean hasHeader(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder(); - - // repeated .DatanodeInfoProto targets = 2; - java.util.List - getTargetsList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index); - int getTargetsCount(); - java.util.List - getTargetsOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder( - int index); - - // optional .DatanodeInfoProto source = 3; - boolean hasSource(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder(); - - // required .OpWriteBlockProto.BlockConstructionStage stage = 4; - boolean hasStage(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage(); - - // required uint32 pipelineSize = 5; - boolean hasPipelineSize(); - int getPipelineSize(); - - // required uint64 minBytesRcvd = 6; - boolean hasMinBytesRcvd(); - long getMinBytesRcvd(); - - // required uint64 maxBytesRcvd = 7; - boolean hasMaxBytesRcvd(); - long getMaxBytesRcvd(); - - // required uint64 latestGenerationStamp = 8; - boolean hasLatestGenerationStamp(); - long getLatestGenerationStamp(); - - // required .ChecksumProto requestedChecksum = 9; - boolean hasRequestedChecksum(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder(); - } - public static final class OpWriteBlockProto extends - com.google.protobuf.GeneratedMessage - implements OpWriteBlockProtoOrBuilder { - // Use OpWriteBlockProto.newBuilder() to construct. - private OpWriteBlockProto(Builder builder) { - super(builder); - } - private OpWriteBlockProto(boolean noInit) {} - - private static final OpWriteBlockProto defaultInstance; - public static OpWriteBlockProto getDefaultInstance() { - return defaultInstance; - } - - public OpWriteBlockProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_fieldAccessorTable; - } - - public enum BlockConstructionStage - implements com.google.protobuf.ProtocolMessageEnum { - PIPELINE_SETUP_APPEND(0, 0), - PIPELINE_SETUP_APPEND_RECOVERY(1, 1), - DATA_STREAMING(2, 2), - PIPELINE_SETUP_STREAMING_RECOVERY(3, 3), - PIPELINE_CLOSE(4, 4), - PIPELINE_CLOSE_RECOVERY(5, 5), - PIPELINE_SETUP_CREATE(6, 6), - TRANSFER_RBW(7, 7), - TRANSFER_FINALIZED(8, 8), - ; - - public static final int PIPELINE_SETUP_APPEND_VALUE = 0; - public static final int PIPELINE_SETUP_APPEND_RECOVERY_VALUE = 1; - public static final int DATA_STREAMING_VALUE = 2; - public static final int PIPELINE_SETUP_STREAMING_RECOVERY_VALUE = 3; - public static final int PIPELINE_CLOSE_VALUE = 4; - public static final int PIPELINE_CLOSE_RECOVERY_VALUE = 5; - public static final int PIPELINE_SETUP_CREATE_VALUE = 6; - public static final int TRANSFER_RBW_VALUE = 7; - public static final int TRANSFER_FINALIZED_VALUE = 8; - - - public final int getNumber() { return value; } - - public static BlockConstructionStage valueOf(int value) { - switch (value) { - case 0: return PIPELINE_SETUP_APPEND; - case 1: return PIPELINE_SETUP_APPEND_RECOVERY; - case 2: return DATA_STREAMING; - case 3: return PIPELINE_SETUP_STREAMING_RECOVERY; - case 4: return PIPELINE_CLOSE; - case 5: return PIPELINE_CLOSE_RECOVERY; - case 6: return PIPELINE_SETUP_CREATE; - case 7: return TRANSFER_RBW; - case 8: return TRANSFER_FINALIZED; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public BlockConstructionStage findValueByNumber(int number) { - return BlockConstructionStage.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDescriptor().getEnumTypes().get(0); - } - - private static final BlockConstructionStage[] VALUES = { - PIPELINE_SETUP_APPEND, PIPELINE_SETUP_APPEND_RECOVERY, DATA_STREAMING, PIPELINE_SETUP_STREAMING_RECOVERY, PIPELINE_CLOSE, PIPELINE_CLOSE_RECOVERY, PIPELINE_SETUP_CREATE, TRANSFER_RBW, TRANSFER_FINALIZED, - }; - - public static BlockConstructionStage valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private BlockConstructionStage(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:OpWriteBlockProto.BlockConstructionStage) - } - - private int bitField0_; - // required .ClientOperationHeaderProto header = 1; - public static final int HEADER_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_; - public boolean hasHeader() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() { - return header_; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() { - return header_; - } - - // repeated .DatanodeInfoProto targets = 2; - public static final int TARGETS_FIELD_NUMBER = 2; - private java.util.List targets_; - public java.util.List getTargetsList() { - return targets_; - } - public java.util.List - getTargetsOrBuilderList() { - return targets_; - } - public int getTargetsCount() { - return targets_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) { - return targets_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder( - int index) { - return targets_.get(index); - } - - // optional .DatanodeInfoProto source = 3; - public static final int SOURCE_FIELD_NUMBER = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_; - public boolean hasSource() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() { - return source_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() { - return source_; - } - - // required .OpWriteBlockProto.BlockConstructionStage stage = 4; - public static final int STAGE_FIELD_NUMBER = 4; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage stage_; - public boolean hasStage() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage() { - return stage_; - } - - // required uint32 pipelineSize = 5; - public static final int PIPELINESIZE_FIELD_NUMBER = 5; - private int pipelineSize_; - public boolean hasPipelineSize() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public int getPipelineSize() { - return pipelineSize_; - } - - // required uint64 minBytesRcvd = 6; - public static final int MINBYTESRCVD_FIELD_NUMBER = 6; - private long minBytesRcvd_; - public boolean hasMinBytesRcvd() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public long getMinBytesRcvd() { - return minBytesRcvd_; - } - - // required uint64 maxBytesRcvd = 7; - public static final int MAXBYTESRCVD_FIELD_NUMBER = 7; - private long maxBytesRcvd_; - public boolean hasMaxBytesRcvd() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public long getMaxBytesRcvd() { - return maxBytesRcvd_; - } - - // required uint64 latestGenerationStamp = 8; - public static final int LATESTGENERATIONSTAMP_FIELD_NUMBER = 8; - private long latestGenerationStamp_; - public boolean hasLatestGenerationStamp() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - public long getLatestGenerationStamp() { - return latestGenerationStamp_; - } - - // required .ChecksumProto requestedChecksum = 9; - public static final int REQUESTEDCHECKSUM_FIELD_NUMBER = 9; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto requestedChecksum_; - public boolean hasRequestedChecksum() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum() { - return requestedChecksum_; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder() { - return requestedChecksum_; - } - - private void initFields() { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance(); - targets_ = java.util.Collections.emptyList(); - source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); - stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND; - pipelineSize_ = 0; - minBytesRcvd_ = 0L; - maxBytesRcvd_ = 0L; - latestGenerationStamp_ = 0L; - requestedChecksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasHeader()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasStage()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasPipelineSize()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasMinBytesRcvd()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasMaxBytesRcvd()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasLatestGenerationStamp()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasRequestedChecksum()) { - memoizedIsInitialized = 0; - return false; - } - if (!getHeader().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getTargetsCount(); i++) { - if (!getTargets(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasSource()) { - if (!getSource().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (!getRequestedChecksum().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, header_); - } - for (int i = 0; i < targets_.size(); i++) { - output.writeMessage(2, targets_.get(i)); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(3, source_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeEnum(4, stage_.getNumber()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt32(5, pipelineSize_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeUInt64(6, minBytesRcvd_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeUInt64(7, maxBytesRcvd_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeUInt64(8, latestGenerationStamp_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - output.writeMessage(9, requestedChecksum_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, header_); - } - for (int i = 0; i < targets_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, targets_.get(i)); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, source_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(4, stage_.getNumber()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(5, pipelineSize_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(6, minBytesRcvd_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(7, maxBytesRcvd_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(8, latestGenerationStamp_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(9, requestedChecksum_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) obj; - - boolean result = true; - result = result && (hasHeader() == other.hasHeader()); - if (hasHeader()) { - result = result && getHeader() - .equals(other.getHeader()); - } - result = result && getTargetsList() - .equals(other.getTargetsList()); - result = result && (hasSource() == other.hasSource()); - if (hasSource()) { - result = result && getSource() - .equals(other.getSource()); - } - result = result && (hasStage() == other.hasStage()); - if (hasStage()) { - result = result && - (getStage() == other.getStage()); - } - result = result && (hasPipelineSize() == other.hasPipelineSize()); - if (hasPipelineSize()) { - result = result && (getPipelineSize() - == other.getPipelineSize()); - } - result = result && (hasMinBytesRcvd() == other.hasMinBytesRcvd()); - if (hasMinBytesRcvd()) { - result = result && (getMinBytesRcvd() - == other.getMinBytesRcvd()); - } - result = result && (hasMaxBytesRcvd() == other.hasMaxBytesRcvd()); - if (hasMaxBytesRcvd()) { - result = result && (getMaxBytesRcvd() - == other.getMaxBytesRcvd()); - } - result = result && (hasLatestGenerationStamp() == other.hasLatestGenerationStamp()); - if (hasLatestGenerationStamp()) { - result = result && (getLatestGenerationStamp() - == other.getLatestGenerationStamp()); - } - result = result && (hasRequestedChecksum() == other.hasRequestedChecksum()); - if (hasRequestedChecksum()) { - result = result && getRequestedChecksum() - .equals(other.getRequestedChecksum()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasHeader()) { - hash = (37 * hash) + HEADER_FIELD_NUMBER; - hash = (53 * hash) + getHeader().hashCode(); - } - if (getTargetsCount() > 0) { - hash = (37 * hash) + TARGETS_FIELD_NUMBER; - hash = (53 * hash) + getTargetsList().hashCode(); - } - if (hasSource()) { - hash = (37 * hash) + SOURCE_FIELD_NUMBER; - hash = (53 * hash) + getSource().hashCode(); - } - if (hasStage()) { - hash = (37 * hash) + STAGE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getStage()); - } - if (hasPipelineSize()) { - hash = (37 * hash) + PIPELINESIZE_FIELD_NUMBER; - hash = (53 * hash) + getPipelineSize(); - } - if (hasMinBytesRcvd()) { - hash = (37 * hash) + MINBYTESRCVD_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getMinBytesRcvd()); - } - if (hasMaxBytesRcvd()) { - hash = (37 * hash) + MAXBYTESRCVD_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getMaxBytesRcvd()); - } - if (hasLatestGenerationStamp()) { - hash = (37 * hash) + LATESTGENERATIONSTAMP_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getLatestGenerationStamp()); - } - if (hasRequestedChecksum()) { - hash = (37 * hash) + REQUESTEDCHECKSUM_FIELD_NUMBER; - hash = (53 * hash) + getRequestedChecksum().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getHeaderFieldBuilder(); - getTargetsFieldBuilder(); - getSourceFieldBuilder(); - getRequestedChecksumFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (headerBuilder_ == null) { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance(); - } else { - headerBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - if (targetsBuilder_ == null) { - targets_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - targetsBuilder_.clear(); - } - if (sourceBuilder_ == null) { - source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); - } else { - sourceBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND; - bitField0_ = (bitField0_ & ~0x00000008); - pipelineSize_ = 0; - bitField0_ = (bitField0_ & ~0x00000010); - minBytesRcvd_ = 0L; - bitField0_ = (bitField0_ & ~0x00000020); - maxBytesRcvd_ = 0L; - bitField0_ = (bitField0_ & ~0x00000040); - latestGenerationStamp_ = 0L; - bitField0_ = (bitField0_ & ~0x00000080); - if (requestedChecksumBuilder_ == null) { - requestedChecksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance(); - } else { - requestedChecksumBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000100); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (headerBuilder_ == null) { - result.header_ = header_; - } else { - result.header_ = headerBuilder_.build(); - } - if (targetsBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - targets_ = java.util.Collections.unmodifiableList(targets_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.targets_ = targets_; - } else { - result.targets_ = targetsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000002; - } - if (sourceBuilder_ == null) { - result.source_ = source_; - } else { - result.source_ = sourceBuilder_.build(); - } - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000004; - } - result.stage_ = stage_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000008; - } - result.pipelineSize_ = pipelineSize_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000010; - } - result.minBytesRcvd_ = minBytesRcvd_; - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000020; - } - result.maxBytesRcvd_ = maxBytesRcvd_; - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000040; - } - result.latestGenerationStamp_ = latestGenerationStamp_; - if (((from_bitField0_ & 0x00000100) == 0x00000100)) { - to_bitField0_ |= 0x00000080; - } - if (requestedChecksumBuilder_ == null) { - result.requestedChecksum_ = requestedChecksum_; - } else { - result.requestedChecksum_ = requestedChecksumBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance()) return this; - if (other.hasHeader()) { - mergeHeader(other.getHeader()); - } - if (targetsBuilder_ == null) { - if (!other.targets_.isEmpty()) { - if (targets_.isEmpty()) { - targets_ = other.targets_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureTargetsIsMutable(); - targets_.addAll(other.targets_); - } - onChanged(); - } - } else { - if (!other.targets_.isEmpty()) { - if (targetsBuilder_.isEmpty()) { - targetsBuilder_.dispose(); - targetsBuilder_ = null; - targets_ = other.targets_; - bitField0_ = (bitField0_ & ~0x00000002); - targetsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getTargetsFieldBuilder() : null; - } else { - targetsBuilder_.addAllMessages(other.targets_); - } - } - } - if (other.hasSource()) { - mergeSource(other.getSource()); - } - if (other.hasStage()) { - setStage(other.getStage()); - } - if (other.hasPipelineSize()) { - setPipelineSize(other.getPipelineSize()); - } - if (other.hasMinBytesRcvd()) { - setMinBytesRcvd(other.getMinBytesRcvd()); - } - if (other.hasMaxBytesRcvd()) { - setMaxBytesRcvd(other.getMaxBytesRcvd()); - } - if (other.hasLatestGenerationStamp()) { - setLatestGenerationStamp(other.getLatestGenerationStamp()); - } - if (other.hasRequestedChecksum()) { - mergeRequestedChecksum(other.getRequestedChecksum()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasHeader()) { - - return false; - } - if (!hasStage()) { - - return false; - } - if (!hasPipelineSize()) { - - return false; - } - if (!hasMinBytesRcvd()) { - - return false; - } - if (!hasMaxBytesRcvd()) { - - return false; - } - if (!hasLatestGenerationStamp()) { - - return false; - } - if (!hasRequestedChecksum()) { - - return false; - } - if (!getHeader().isInitialized()) { - - return false; - } - for (int i = 0; i < getTargetsCount(); i++) { - if (!getTargets(i).isInitialized()) { - - return false; - } - } - if (hasSource()) { - if (!getSource().isInitialized()) { - - return false; - } - } - if (!getRequestedChecksum().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(); - if (hasHeader()) { - subBuilder.mergeFrom(getHeader()); - } - input.readMessage(subBuilder, extensionRegistry); - setHeader(subBuilder.buildPartial()); - break; - } - case 18: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addTargets(subBuilder.buildPartial()); - break; - } - case 26: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(); - if (hasSource()) { - subBuilder.mergeFrom(getSource()); - } - input.readMessage(subBuilder, extensionRegistry); - setSource(subBuilder.buildPartial()); - break; - } - case 32: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(4, rawValue); - } else { - bitField0_ |= 0x00000008; - stage_ = value; - } - break; - } - case 40: { - bitField0_ |= 0x00000010; - pipelineSize_ = input.readUInt32(); - break; - } - case 48: { - bitField0_ |= 0x00000020; - minBytesRcvd_ = input.readUInt64(); - break; - } - case 56: { - bitField0_ |= 0x00000040; - maxBytesRcvd_ = input.readUInt64(); - break; - } - case 64: { - bitField0_ |= 0x00000080; - latestGenerationStamp_ = input.readUInt64(); - break; - } - case 74: { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder(); - if (hasRequestedChecksum()) { - subBuilder.mergeFrom(getRequestedChecksum()); - } - input.readMessage(subBuilder, extensionRegistry); - setRequestedChecksum(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ClientOperationHeaderProto header = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_; - public boolean hasHeader() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() { - if (headerBuilder_ == null) { - return header_; - } else { - return headerBuilder_.getMessage(); - } - } - public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) { - if (headerBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - header_ = value; - onChanged(); - } else { - headerBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setHeader( - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) { - if (headerBuilder_ == null) { - header_ = builderForValue.build(); - onChanged(); - } else { - headerBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) { - if (headerBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) { - header_ = - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial(); - } else { - header_ = value; - } - onChanged(); - } else { - headerBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearHeader() { - if (headerBuilder_ == null) { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance(); - onChanged(); - } else { - headerBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getHeaderFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() { - if (headerBuilder_ != null) { - return headerBuilder_.getMessageOrBuilder(); - } else { - return header_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> - getHeaderFieldBuilder() { - if (headerBuilder_ == null) { - headerBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>( - header_, - getParentForChildren(), - isClean()); - header_ = null; - } - return headerBuilder_; - } - - // repeated .DatanodeInfoProto targets = 2; - private java.util.List targets_ = - java.util.Collections.emptyList(); - private void ensureTargetsIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - targets_ = new java.util.ArrayList(targets_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> targetsBuilder_; - - public java.util.List getTargetsList() { - if (targetsBuilder_ == null) { - return java.util.Collections.unmodifiableList(targets_); - } else { - return targetsBuilder_.getMessageList(); - } - } - public int getTargetsCount() { - if (targetsBuilder_ == null) { - return targets_.size(); - } else { - return targetsBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) { - if (targetsBuilder_ == null) { - return targets_.get(index); - } else { - return targetsBuilder_.getMessage(index); - } - } - public Builder setTargets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (targetsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTargetsIsMutable(); - targets_.set(index, value); - onChanged(); - } else { - targetsBuilder_.setMessage(index, value); - } - return this; - } - public Builder setTargets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (targetsBuilder_ == null) { - ensureTargetsIsMutable(); - targets_.set(index, builderForValue.build()); - onChanged(); - } else { - targetsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (targetsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTargetsIsMutable(); - targets_.add(value); - onChanged(); - } else { - targetsBuilder_.addMessage(value); - } - return this; - } - public Builder addTargets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (targetsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTargetsIsMutable(); - targets_.add(index, value); - onChanged(); - } else { - targetsBuilder_.addMessage(index, value); - } - return this; - } - public Builder addTargets( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (targetsBuilder_ == null) { - ensureTargetsIsMutable(); - targets_.add(builderForValue.build()); - onChanged(); - } else { - targetsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addTargets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (targetsBuilder_ == null) { - ensureTargetsIsMutable(); - targets_.add(index, builderForValue.build()); - onChanged(); - } else { - targetsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllTargets( - java.lang.Iterable values) { - if (targetsBuilder_ == null) { - ensureTargetsIsMutable(); - super.addAll(values, targets_); - onChanged(); - } else { - targetsBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearTargets() { - if (targetsBuilder_ == null) { - targets_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - targetsBuilder_.clear(); - } - return this; - } - public Builder removeTargets(int index) { - if (targetsBuilder_ == null) { - ensureTargetsIsMutable(); - targets_.remove(index); - onChanged(); - } else { - targetsBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getTargetsBuilder( - int index) { - return getTargetsFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder( - int index) { - if (targetsBuilder_ == null) { - return targets_.get(index); } else { - return targetsBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getTargetsOrBuilderList() { - if (targetsBuilder_ != null) { - return targetsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(targets_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder() { - return getTargetsFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder( - int index) { - return getTargetsFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public java.util.List - getTargetsBuilderList() { - return getTargetsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> - getTargetsFieldBuilder() { - if (targetsBuilder_ == null) { - targetsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( - targets_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - targets_ = null; - } - return targetsBuilder_; - } - - // optional .DatanodeInfoProto source = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> sourceBuilder_; - public boolean hasSource() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() { - if (sourceBuilder_ == null) { - return source_; - } else { - return sourceBuilder_.getMessage(); - } - } - public Builder setSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (sourceBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - source_ = value; - onChanged(); - } else { - sourceBuilder_.setMessage(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder setSource( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (sourceBuilder_ == null) { - source_ = builderForValue.build(); - onChanged(); - } else { - sourceBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder mergeSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (sourceBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && - source_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) { - source_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(source_).mergeFrom(value).buildPartial(); - } else { - source_ = value; - } - onChanged(); - } else { - sourceBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder clearSource() { - if (sourceBuilder_ == null) { - source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); - onChanged(); - } else { - sourceBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getSourceBuilder() { - bitField0_ |= 0x00000004; - onChanged(); - return getSourceFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() { - if (sourceBuilder_ != null) { - return sourceBuilder_.getMessageOrBuilder(); - } else { - return source_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> - getSourceFieldBuilder() { - if (sourceBuilder_ == null) { - sourceBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( - source_, - getParentForChildren(), - isClean()); - source_ = null; - } - return sourceBuilder_; - } - - // required .OpWriteBlockProto.BlockConstructionStage stage = 4; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND; - public boolean hasStage() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage() { - return stage_; - } - public Builder setStage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - stage_ = value; - onChanged(); - return this; - } - public Builder clearStage() { - bitField0_ = (bitField0_ & ~0x00000008); - stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND; - onChanged(); - return this; - } - - // required uint32 pipelineSize = 5; - private int pipelineSize_ ; - public boolean hasPipelineSize() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public int getPipelineSize() { - return pipelineSize_; - } - public Builder setPipelineSize(int value) { - bitField0_ |= 0x00000010; - pipelineSize_ = value; - onChanged(); - return this; - } - public Builder clearPipelineSize() { - bitField0_ = (bitField0_ & ~0x00000010); - pipelineSize_ = 0; - onChanged(); - return this; - } - - // required uint64 minBytesRcvd = 6; - private long minBytesRcvd_ ; - public boolean hasMinBytesRcvd() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public long getMinBytesRcvd() { - return minBytesRcvd_; - } - public Builder setMinBytesRcvd(long value) { - bitField0_ |= 0x00000020; - minBytesRcvd_ = value; - onChanged(); - return this; - } - public Builder clearMinBytesRcvd() { - bitField0_ = (bitField0_ & ~0x00000020); - minBytesRcvd_ = 0L; - onChanged(); - return this; - } - - // required uint64 maxBytesRcvd = 7; - private long maxBytesRcvd_ ; - public boolean hasMaxBytesRcvd() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - public long getMaxBytesRcvd() { - return maxBytesRcvd_; - } - public Builder setMaxBytesRcvd(long value) { - bitField0_ |= 0x00000040; - maxBytesRcvd_ = value; - onChanged(); - return this; - } - public Builder clearMaxBytesRcvd() { - bitField0_ = (bitField0_ & ~0x00000040); - maxBytesRcvd_ = 0L; - onChanged(); - return this; - } - - // required uint64 latestGenerationStamp = 8; - private long latestGenerationStamp_ ; - public boolean hasLatestGenerationStamp() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - public long getLatestGenerationStamp() { - return latestGenerationStamp_; - } - public Builder setLatestGenerationStamp(long value) { - bitField0_ |= 0x00000080; - latestGenerationStamp_ = value; - onChanged(); - return this; - } - public Builder clearLatestGenerationStamp() { - bitField0_ = (bitField0_ & ~0x00000080); - latestGenerationStamp_ = 0L; - onChanged(); - return this; - } - - // required .ChecksumProto requestedChecksum = 9; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto requestedChecksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> requestedChecksumBuilder_; - public boolean hasRequestedChecksum() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum() { - if (requestedChecksumBuilder_ == null) { - return requestedChecksum_; - } else { - return requestedChecksumBuilder_.getMessage(); - } - } - public Builder setRequestedChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) { - if (requestedChecksumBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - requestedChecksum_ = value; - onChanged(); - } else { - requestedChecksumBuilder_.setMessage(value); - } - bitField0_ |= 0x00000100; - return this; - } - public Builder setRequestedChecksum( - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder builderForValue) { - if (requestedChecksumBuilder_ == null) { - requestedChecksum_ = builderForValue.build(); - onChanged(); - } else { - requestedChecksumBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000100; - return this; - } - public Builder mergeRequestedChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) { - if (requestedChecksumBuilder_ == null) { - if (((bitField0_ & 0x00000100) == 0x00000100) && - requestedChecksum_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) { - requestedChecksum_ = - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder(requestedChecksum_).mergeFrom(value).buildPartial(); - } else { - requestedChecksum_ = value; - } - onChanged(); - } else { - requestedChecksumBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000100; - return this; - } - public Builder clearRequestedChecksum() { - if (requestedChecksumBuilder_ == null) { - requestedChecksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance(); - onChanged(); - } else { - requestedChecksumBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000100); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder getRequestedChecksumBuilder() { - bitField0_ |= 0x00000100; - onChanged(); - return getRequestedChecksumFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder() { - if (requestedChecksumBuilder_ != null) { - return requestedChecksumBuilder_.getMessageOrBuilder(); - } else { - return requestedChecksum_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> - getRequestedChecksumFieldBuilder() { - if (requestedChecksumBuilder_ == null) { - requestedChecksumBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>( - requestedChecksum_, - getParentForChildren(), - isClean()); - requestedChecksum_ = null; - } - return requestedChecksumBuilder_; - } - - // @@protoc_insertion_point(builder_scope:OpWriteBlockProto) - } - - static { - defaultInstance = new OpWriteBlockProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:OpWriteBlockProto) - } - - public interface OpTransferBlockProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ClientOperationHeaderProto header = 1; - boolean hasHeader(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder(); - - // repeated .DatanodeInfoProto targets = 2; - java.util.List - getTargetsList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index); - int getTargetsCount(); - java.util.List - getTargetsOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder( - int index); - } - public static final class OpTransferBlockProto extends - com.google.protobuf.GeneratedMessage - implements OpTransferBlockProtoOrBuilder { - // Use OpTransferBlockProto.newBuilder() to construct. - private OpTransferBlockProto(Builder builder) { - super(builder); - } - private OpTransferBlockProto(boolean noInit) {} - - private static final OpTransferBlockProto defaultInstance; - public static OpTransferBlockProto getDefaultInstance() { - return defaultInstance; - } - - public OpTransferBlockProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpTransferBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpTransferBlockProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ClientOperationHeaderProto header = 1; - public static final int HEADER_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_; - public boolean hasHeader() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() { - return header_; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() { - return header_; - } - - // repeated .DatanodeInfoProto targets = 2; - public static final int TARGETS_FIELD_NUMBER = 2; - private java.util.List targets_; - public java.util.List getTargetsList() { - return targets_; - } - public java.util.List - getTargetsOrBuilderList() { - return targets_; - } - public int getTargetsCount() { - return targets_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) { - return targets_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder( - int index) { - return targets_.get(index); - } - - private void initFields() { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance(); - targets_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasHeader()) { - memoizedIsInitialized = 0; - return false; - } - if (!getHeader().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getTargetsCount(); i++) { - if (!getTargets(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, header_); - } - for (int i = 0; i < targets_.size(); i++) { - output.writeMessage(2, targets_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, header_); - } - for (int i = 0; i < targets_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, targets_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto) obj; - - boolean result = true; - result = result && (hasHeader() == other.hasHeader()); - if (hasHeader()) { - result = result && getHeader() - .equals(other.getHeader()); - } - result = result && getTargetsList() - .equals(other.getTargetsList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasHeader()) { - hash = (37 * hash) + HEADER_FIELD_NUMBER; - hash = (53 * hash) + getHeader().hashCode(); - } - if (getTargetsCount() > 0) { - hash = (37 * hash) + TARGETS_FIELD_NUMBER; - hash = (53 * hash) + getTargetsList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpTransferBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpTransferBlockProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getHeaderFieldBuilder(); - getTargetsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (headerBuilder_ == null) { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance(); - } else { - headerBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - if (targetsBuilder_ == null) { - targets_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - targetsBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (headerBuilder_ == null) { - result.header_ = header_; - } else { - result.header_ = headerBuilder_.build(); - } - if (targetsBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - targets_ = java.util.Collections.unmodifiableList(targets_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.targets_ = targets_; - } else { - result.targets_ = targetsBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.getDefaultInstance()) return this; - if (other.hasHeader()) { - mergeHeader(other.getHeader()); - } - if (targetsBuilder_ == null) { - if (!other.targets_.isEmpty()) { - if (targets_.isEmpty()) { - targets_ = other.targets_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureTargetsIsMutable(); - targets_.addAll(other.targets_); - } - onChanged(); - } - } else { - if (!other.targets_.isEmpty()) { - if (targetsBuilder_.isEmpty()) { - targetsBuilder_.dispose(); - targetsBuilder_ = null; - targets_ = other.targets_; - bitField0_ = (bitField0_ & ~0x00000002); - targetsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getTargetsFieldBuilder() : null; - } else { - targetsBuilder_.addAllMessages(other.targets_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasHeader()) { - - return false; - } - if (!getHeader().isInitialized()) { - - return false; - } - for (int i = 0; i < getTargetsCount(); i++) { - if (!getTargets(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(); - if (hasHeader()) { - subBuilder.mergeFrom(getHeader()); - } - input.readMessage(subBuilder, extensionRegistry); - setHeader(subBuilder.buildPartial()); - break; - } - case 18: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addTargets(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ClientOperationHeaderProto header = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_; - public boolean hasHeader() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() { - if (headerBuilder_ == null) { - return header_; - } else { - return headerBuilder_.getMessage(); - } - } - public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) { - if (headerBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - header_ = value; - onChanged(); - } else { - headerBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setHeader( - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) { - if (headerBuilder_ == null) { - header_ = builderForValue.build(); - onChanged(); - } else { - headerBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) { - if (headerBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) { - header_ = - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial(); - } else { - header_ = value; - } - onChanged(); - } else { - headerBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearHeader() { - if (headerBuilder_ == null) { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance(); - onChanged(); - } else { - headerBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getHeaderFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() { - if (headerBuilder_ != null) { - return headerBuilder_.getMessageOrBuilder(); - } else { - return header_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> - getHeaderFieldBuilder() { - if (headerBuilder_ == null) { - headerBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>( - header_, - getParentForChildren(), - isClean()); - header_ = null; - } - return headerBuilder_; - } - - // repeated .DatanodeInfoProto targets = 2; - private java.util.List targets_ = - java.util.Collections.emptyList(); - private void ensureTargetsIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - targets_ = new java.util.ArrayList(targets_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> targetsBuilder_; - - public java.util.List getTargetsList() { - if (targetsBuilder_ == null) { - return java.util.Collections.unmodifiableList(targets_); - } else { - return targetsBuilder_.getMessageList(); - } - } - public int getTargetsCount() { - if (targetsBuilder_ == null) { - return targets_.size(); - } else { - return targetsBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) { - if (targetsBuilder_ == null) { - return targets_.get(index); - } else { - return targetsBuilder_.getMessage(index); - } - } - public Builder setTargets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (targetsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTargetsIsMutable(); - targets_.set(index, value); - onChanged(); - } else { - targetsBuilder_.setMessage(index, value); - } - return this; - } - public Builder setTargets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (targetsBuilder_ == null) { - ensureTargetsIsMutable(); - targets_.set(index, builderForValue.build()); - onChanged(); - } else { - targetsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (targetsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTargetsIsMutable(); - targets_.add(value); - onChanged(); - } else { - targetsBuilder_.addMessage(value); - } - return this; - } - public Builder addTargets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (targetsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTargetsIsMutable(); - targets_.add(index, value); - onChanged(); - } else { - targetsBuilder_.addMessage(index, value); - } - return this; - } - public Builder addTargets( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (targetsBuilder_ == null) { - ensureTargetsIsMutable(); - targets_.add(builderForValue.build()); - onChanged(); - } else { - targetsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addTargets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (targetsBuilder_ == null) { - ensureTargetsIsMutable(); - targets_.add(index, builderForValue.build()); - onChanged(); - } else { - targetsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllTargets( - java.lang.Iterable values) { - if (targetsBuilder_ == null) { - ensureTargetsIsMutable(); - super.addAll(values, targets_); - onChanged(); - } else { - targetsBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearTargets() { - if (targetsBuilder_ == null) { - targets_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - targetsBuilder_.clear(); - } - return this; - } - public Builder removeTargets(int index) { - if (targetsBuilder_ == null) { - ensureTargetsIsMutable(); - targets_.remove(index); - onChanged(); - } else { - targetsBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getTargetsBuilder( - int index) { - return getTargetsFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder( - int index) { - if (targetsBuilder_ == null) { - return targets_.get(index); } else { - return targetsBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getTargetsOrBuilderList() { - if (targetsBuilder_ != null) { - return targetsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(targets_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder() { - return getTargetsFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder( - int index) { - return getTargetsFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public java.util.List - getTargetsBuilderList() { - return getTargetsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> - getTargetsFieldBuilder() { - if (targetsBuilder_ == null) { - targetsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( - targets_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - targets_ = null; - } - return targetsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:OpTransferBlockProto) - } - - static { - defaultInstance = new OpTransferBlockProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:OpTransferBlockProto) - } - - public interface OpReplaceBlockProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .BaseHeaderProto header = 1; - boolean hasHeader(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder(); - - // required string delHint = 2; - boolean hasDelHint(); - String getDelHint(); - - // required .DatanodeInfoProto source = 3; - boolean hasSource(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder(); - } - public static final class OpReplaceBlockProto extends - com.google.protobuf.GeneratedMessage - implements OpReplaceBlockProtoOrBuilder { - // Use OpReplaceBlockProto.newBuilder() to construct. - private OpReplaceBlockProto(Builder builder) { - super(builder); - } - private OpReplaceBlockProto(boolean noInit) {} - - private static final OpReplaceBlockProto defaultInstance; - public static OpReplaceBlockProto getDefaultInstance() { - return defaultInstance; - } - - public OpReplaceBlockProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReplaceBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReplaceBlockProto_fieldAccessorTable; - } - - private int bitField0_; - // required .BaseHeaderProto header = 1; - public static final int HEADER_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_; - public boolean hasHeader() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() { - return header_; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() { - return header_; - } - - // required string delHint = 2; - public static final int DELHINT_FIELD_NUMBER = 2; - private java.lang.Object delHint_; - public boolean hasDelHint() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getDelHint() { - java.lang.Object ref = delHint_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - delHint_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getDelHintBytes() { - java.lang.Object ref = delHint_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - delHint_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required .DatanodeInfoProto source = 3; - public static final int SOURCE_FIELD_NUMBER = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_; - public boolean hasSource() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() { - return source_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() { - return source_; - } - - private void initFields() { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - delHint_ = ""; - source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasHeader()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasDelHint()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasSource()) { - memoizedIsInitialized = 0; - return false; - } - if (!getHeader().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - if (!getSource().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, header_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getDelHintBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(3, source_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, header_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getDelHintBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, source_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto) obj; - - boolean result = true; - result = result && (hasHeader() == other.hasHeader()); - if (hasHeader()) { - result = result && getHeader() - .equals(other.getHeader()); - } - result = result && (hasDelHint() == other.hasDelHint()); - if (hasDelHint()) { - result = result && getDelHint() - .equals(other.getDelHint()); - } - result = result && (hasSource() == other.hasSource()); - if (hasSource()) { - result = result && getSource() - .equals(other.getSource()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasHeader()) { - hash = (37 * hash) + HEADER_FIELD_NUMBER; - hash = (53 * hash) + getHeader().hashCode(); - } - if (hasDelHint()) { - hash = (37 * hash) + DELHINT_FIELD_NUMBER; - hash = (53 * hash) + getDelHint().hashCode(); - } - if (hasSource()) { - hash = (37 * hash) + SOURCE_FIELD_NUMBER; - hash = (53 * hash) + getSource().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReplaceBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReplaceBlockProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getHeaderFieldBuilder(); - getSourceFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (headerBuilder_ == null) { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - } else { - headerBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - delHint_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - if (sourceBuilder_ == null) { - source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); - } else { - sourceBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (headerBuilder_ == null) { - result.header_ = header_; - } else { - result.header_ = headerBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.delHint_ = delHint_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - if (sourceBuilder_ == null) { - result.source_ = source_; - } else { - result.source_ = sourceBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.getDefaultInstance()) return this; - if (other.hasHeader()) { - mergeHeader(other.getHeader()); - } - if (other.hasDelHint()) { - setDelHint(other.getDelHint()); - } - if (other.hasSource()) { - mergeSource(other.getSource()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasHeader()) { - - return false; - } - if (!hasDelHint()) { - - return false; - } - if (!hasSource()) { - - return false; - } - if (!getHeader().isInitialized()) { - - return false; - } - if (!getSource().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(); - if (hasHeader()) { - subBuilder.mergeFrom(getHeader()); - } - input.readMessage(subBuilder, extensionRegistry); - setHeader(subBuilder.buildPartial()); - break; - } - case 18: { - bitField0_ |= 0x00000002; - delHint_ = input.readBytes(); - break; - } - case 26: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(); - if (hasSource()) { - subBuilder.mergeFrom(getSource()); - } - input.readMessage(subBuilder, extensionRegistry); - setSource(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .BaseHeaderProto header = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_; - public boolean hasHeader() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() { - if (headerBuilder_ == null) { - return header_; - } else { - return headerBuilder_.getMessage(); - } - } - public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) { - if (headerBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - header_ = value; - onChanged(); - } else { - headerBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setHeader( - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) { - if (headerBuilder_ == null) { - header_ = builderForValue.build(); - onChanged(); - } else { - headerBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) { - if (headerBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) { - header_ = - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial(); - } else { - header_ = value; - } - onChanged(); - } else { - headerBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearHeader() { - if (headerBuilder_ == null) { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - onChanged(); - } else { - headerBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getHeaderFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() { - if (headerBuilder_ != null) { - return headerBuilder_.getMessageOrBuilder(); - } else { - return header_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> - getHeaderFieldBuilder() { - if (headerBuilder_ == null) { - headerBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>( - header_, - getParentForChildren(), - isClean()); - header_ = null; - } - return headerBuilder_; - } - - // required string delHint = 2; - private java.lang.Object delHint_ = ""; - public boolean hasDelHint() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getDelHint() { - java.lang.Object ref = delHint_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - delHint_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setDelHint(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - delHint_ = value; - onChanged(); - return this; - } - public Builder clearDelHint() { - bitField0_ = (bitField0_ & ~0x00000002); - delHint_ = getDefaultInstance().getDelHint(); - onChanged(); - return this; - } - void setDelHint(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - delHint_ = value; - onChanged(); - } - - // required .DatanodeInfoProto source = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> sourceBuilder_; - public boolean hasSource() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() { - if (sourceBuilder_ == null) { - return source_; - } else { - return sourceBuilder_.getMessage(); - } - } - public Builder setSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (sourceBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - source_ = value; - onChanged(); - } else { - sourceBuilder_.setMessage(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder setSource( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (sourceBuilder_ == null) { - source_ = builderForValue.build(); - onChanged(); - } else { - sourceBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder mergeSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (sourceBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && - source_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) { - source_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(source_).mergeFrom(value).buildPartial(); - } else { - source_ = value; - } - onChanged(); - } else { - sourceBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder clearSource() { - if (sourceBuilder_ == null) { - source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); - onChanged(); - } else { - sourceBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getSourceBuilder() { - bitField0_ |= 0x00000004; - onChanged(); - return getSourceFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() { - if (sourceBuilder_ != null) { - return sourceBuilder_.getMessageOrBuilder(); - } else { - return source_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> - getSourceFieldBuilder() { - if (sourceBuilder_ == null) { - sourceBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( - source_, - getParentForChildren(), - isClean()); - source_ = null; - } - return sourceBuilder_; - } - - // @@protoc_insertion_point(builder_scope:OpReplaceBlockProto) - } - - static { - defaultInstance = new OpReplaceBlockProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:OpReplaceBlockProto) - } - - public interface OpCopyBlockProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .BaseHeaderProto header = 1; - boolean hasHeader(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder(); - } - public static final class OpCopyBlockProto extends - com.google.protobuf.GeneratedMessage - implements OpCopyBlockProtoOrBuilder { - // Use OpCopyBlockProto.newBuilder() to construct. - private OpCopyBlockProto(Builder builder) { - super(builder); - } - private OpCopyBlockProto(boolean noInit) {} - - private static final OpCopyBlockProto defaultInstance; - public static OpCopyBlockProto getDefaultInstance() { - return defaultInstance; - } - - public OpCopyBlockProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpCopyBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpCopyBlockProto_fieldAccessorTable; - } - - private int bitField0_; - // required .BaseHeaderProto header = 1; - public static final int HEADER_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_; - public boolean hasHeader() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() { - return header_; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() { - return header_; - } - - private void initFields() { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasHeader()) { - memoizedIsInitialized = 0; - return false; - } - if (!getHeader().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, header_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, header_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto) obj; - - boolean result = true; - result = result && (hasHeader() == other.hasHeader()); - if (hasHeader()) { - result = result && getHeader() - .equals(other.getHeader()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasHeader()) { - hash = (37 * hash) + HEADER_FIELD_NUMBER; - hash = (53 * hash) + getHeader().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpCopyBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpCopyBlockProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getHeaderFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (headerBuilder_ == null) { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - } else { - headerBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (headerBuilder_ == null) { - result.header_ = header_; - } else { - result.header_ = headerBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.getDefaultInstance()) return this; - if (other.hasHeader()) { - mergeHeader(other.getHeader()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasHeader()) { - - return false; - } - if (!getHeader().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(); - if (hasHeader()) { - subBuilder.mergeFrom(getHeader()); - } - input.readMessage(subBuilder, extensionRegistry); - setHeader(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .BaseHeaderProto header = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_; - public boolean hasHeader() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() { - if (headerBuilder_ == null) { - return header_; - } else { - return headerBuilder_.getMessage(); - } - } - public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) { - if (headerBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - header_ = value; - onChanged(); - } else { - headerBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setHeader( - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) { - if (headerBuilder_ == null) { - header_ = builderForValue.build(); - onChanged(); - } else { - headerBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) { - if (headerBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) { - header_ = - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial(); - } else { - header_ = value; - } - onChanged(); - } else { - headerBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearHeader() { - if (headerBuilder_ == null) { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - onChanged(); - } else { - headerBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getHeaderFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() { - if (headerBuilder_ != null) { - return headerBuilder_.getMessageOrBuilder(); - } else { - return header_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> - getHeaderFieldBuilder() { - if (headerBuilder_ == null) { - headerBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>( - header_, - getParentForChildren(), - isClean()); - header_ = null; - } - return headerBuilder_; - } - - // @@protoc_insertion_point(builder_scope:OpCopyBlockProto) - } - - static { - defaultInstance = new OpCopyBlockProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:OpCopyBlockProto) - } - - public interface OpBlockChecksumProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .BaseHeaderProto header = 1; - boolean hasHeader(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder(); - } - public static final class OpBlockChecksumProto extends - com.google.protobuf.GeneratedMessage - implements OpBlockChecksumProtoOrBuilder { - // Use OpBlockChecksumProto.newBuilder() to construct. - private OpBlockChecksumProto(Builder builder) { - super(builder); - } - private OpBlockChecksumProto(boolean noInit) {} - - private static final OpBlockChecksumProto defaultInstance; - public static OpBlockChecksumProto getDefaultInstance() { - return defaultInstance; - } - - public OpBlockChecksumProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumProto_fieldAccessorTable; - } - - private int bitField0_; - // required .BaseHeaderProto header = 1; - public static final int HEADER_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_; - public boolean hasHeader() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() { - return header_; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() { - return header_; - } - - private void initFields() { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasHeader()) { - memoizedIsInitialized = 0; - return false; - } - if (!getHeader().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, header_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, header_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto) obj; - - boolean result = true; - result = result && (hasHeader() == other.hasHeader()); - if (hasHeader()) { - result = result && getHeader() - .equals(other.getHeader()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasHeader()) { - hash = (37 * hash) + HEADER_FIELD_NUMBER; - hash = (53 * hash) + getHeader().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getHeaderFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (headerBuilder_ == null) { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - } else { - headerBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (headerBuilder_ == null) { - result.header_ = header_; - } else { - result.header_ = headerBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.getDefaultInstance()) return this; - if (other.hasHeader()) { - mergeHeader(other.getHeader()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasHeader()) { - - return false; - } - if (!getHeader().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(); - if (hasHeader()) { - subBuilder.mergeFrom(getHeader()); - } - input.readMessage(subBuilder, extensionRegistry); - setHeader(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .BaseHeaderProto header = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_; - public boolean hasHeader() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() { - if (headerBuilder_ == null) { - return header_; - } else { - return headerBuilder_.getMessage(); - } - } - public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) { - if (headerBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - header_ = value; - onChanged(); - } else { - headerBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setHeader( - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) { - if (headerBuilder_ == null) { - header_ = builderForValue.build(); - onChanged(); - } else { - headerBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) { - if (headerBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) { - header_ = - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial(); - } else { - header_ = value; - } - onChanged(); - } else { - headerBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearHeader() { - if (headerBuilder_ == null) { - header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance(); - onChanged(); - } else { - headerBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getHeaderFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() { - if (headerBuilder_ != null) { - return headerBuilder_.getMessageOrBuilder(); - } else { - return header_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> - getHeaderFieldBuilder() { - if (headerBuilder_ == null) { - headerBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>( - header_, - getParentForChildren(), - isClean()); - header_ = null; - } - return headerBuilder_; - } - - // @@protoc_insertion_point(builder_scope:OpBlockChecksumProto) - } - - static { - defaultInstance = new OpBlockChecksumProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:OpBlockChecksumProto) - } - - public interface PacketHeaderProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required sfixed64 offsetInBlock = 1; - boolean hasOffsetInBlock(); - long getOffsetInBlock(); - - // required sfixed64 seqno = 2; - boolean hasSeqno(); - long getSeqno(); - - // required bool lastPacketInBlock = 3; - boolean hasLastPacketInBlock(); - boolean getLastPacketInBlock(); - - // required sfixed32 dataLen = 4; - boolean hasDataLen(); - int getDataLen(); - } - public static final class PacketHeaderProto extends - com.google.protobuf.GeneratedMessage - implements PacketHeaderProtoOrBuilder { - // Use PacketHeaderProto.newBuilder() to construct. - private PacketHeaderProto(Builder builder) { - super(builder); - } - private PacketHeaderProto(boolean noInit) {} - - private static final PacketHeaderProto defaultInstance; - public static PacketHeaderProto getDefaultInstance() { - return defaultInstance; - } - - public PacketHeaderProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PacketHeaderProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PacketHeaderProto_fieldAccessorTable; - } - - private int bitField0_; - // required sfixed64 offsetInBlock = 1; - public static final int OFFSETINBLOCK_FIELD_NUMBER = 1; - private long offsetInBlock_; - public boolean hasOffsetInBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getOffsetInBlock() { - return offsetInBlock_; - } - - // required sfixed64 seqno = 2; - public static final int SEQNO_FIELD_NUMBER = 2; - private long seqno_; - public boolean hasSeqno() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getSeqno() { - return seqno_; - } - - // required bool lastPacketInBlock = 3; - public static final int LASTPACKETINBLOCK_FIELD_NUMBER = 3; - private boolean lastPacketInBlock_; - public boolean hasLastPacketInBlock() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public boolean getLastPacketInBlock() { - return lastPacketInBlock_; - } - - // required sfixed32 dataLen = 4; - public static final int DATALEN_FIELD_NUMBER = 4; - private int dataLen_; - public boolean hasDataLen() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public int getDataLen() { - return dataLen_; - } - - private void initFields() { - offsetInBlock_ = 0L; - seqno_ = 0L; - lastPacketInBlock_ = false; - dataLen_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasOffsetInBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasSeqno()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasLastPacketInBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasDataLen()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeSFixed64(1, offsetInBlock_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeSFixed64(2, seqno_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBool(3, lastPacketInBlock_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeSFixed32(4, dataLen_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeSFixed64Size(1, offsetInBlock_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeSFixed64Size(2, seqno_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(3, lastPacketInBlock_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeSFixed32Size(4, dataLen_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto) obj; - - boolean result = true; - result = result && (hasOffsetInBlock() == other.hasOffsetInBlock()); - if (hasOffsetInBlock()) { - result = result && (getOffsetInBlock() - == other.getOffsetInBlock()); - } - result = result && (hasSeqno() == other.hasSeqno()); - if (hasSeqno()) { - result = result && (getSeqno() - == other.getSeqno()); - } - result = result && (hasLastPacketInBlock() == other.hasLastPacketInBlock()); - if (hasLastPacketInBlock()) { - result = result && (getLastPacketInBlock() - == other.getLastPacketInBlock()); - } - result = result && (hasDataLen() == other.hasDataLen()); - if (hasDataLen()) { - result = result && (getDataLen() - == other.getDataLen()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasOffsetInBlock()) { - hash = (37 * hash) + OFFSETINBLOCK_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getOffsetInBlock()); - } - if (hasSeqno()) { - hash = (37 * hash) + SEQNO_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getSeqno()); - } - if (hasLastPacketInBlock()) { - hash = (37 * hash) + LASTPACKETINBLOCK_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getLastPacketInBlock()); - } - if (hasDataLen()) { - hash = (37 * hash) + DATALEN_FIELD_NUMBER; - hash = (53 * hash) + getDataLen(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PacketHeaderProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PacketHeaderProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - offsetInBlock_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - seqno_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - lastPacketInBlock_ = false; - bitField0_ = (bitField0_ & ~0x00000004); - dataLen_ = 0; - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.offsetInBlock_ = offsetInBlock_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.seqno_ = seqno_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.lastPacketInBlock_ = lastPacketInBlock_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.dataLen_ = dataLen_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.getDefaultInstance()) return this; - if (other.hasOffsetInBlock()) { - setOffsetInBlock(other.getOffsetInBlock()); - } - if (other.hasSeqno()) { - setSeqno(other.getSeqno()); - } - if (other.hasLastPacketInBlock()) { - setLastPacketInBlock(other.getLastPacketInBlock()); - } - if (other.hasDataLen()) { - setDataLen(other.getDataLen()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasOffsetInBlock()) { - - return false; - } - if (!hasSeqno()) { - - return false; - } - if (!hasLastPacketInBlock()) { - - return false; - } - if (!hasDataLen()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 9: { - bitField0_ |= 0x00000001; - offsetInBlock_ = input.readSFixed64(); - break; - } - case 17: { - bitField0_ |= 0x00000002; - seqno_ = input.readSFixed64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - lastPacketInBlock_ = input.readBool(); - break; - } - case 37: { - bitField0_ |= 0x00000008; - dataLen_ = input.readSFixed32(); - break; - } - } - } - } - - private int bitField0_; - - // required sfixed64 offsetInBlock = 1; - private long offsetInBlock_ ; - public boolean hasOffsetInBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getOffsetInBlock() { - return offsetInBlock_; - } - public Builder setOffsetInBlock(long value) { - bitField0_ |= 0x00000001; - offsetInBlock_ = value; - onChanged(); - return this; - } - public Builder clearOffsetInBlock() { - bitField0_ = (bitField0_ & ~0x00000001); - offsetInBlock_ = 0L; - onChanged(); - return this; - } - - // required sfixed64 seqno = 2; - private long seqno_ ; - public boolean hasSeqno() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getSeqno() { - return seqno_; - } - public Builder setSeqno(long value) { - bitField0_ |= 0x00000002; - seqno_ = value; - onChanged(); - return this; - } - public Builder clearSeqno() { - bitField0_ = (bitField0_ & ~0x00000002); - seqno_ = 0L; - onChanged(); - return this; - } - - // required bool lastPacketInBlock = 3; - private boolean lastPacketInBlock_ ; - public boolean hasLastPacketInBlock() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public boolean getLastPacketInBlock() { - return lastPacketInBlock_; - } - public Builder setLastPacketInBlock(boolean value) { - bitField0_ |= 0x00000004; - lastPacketInBlock_ = value; - onChanged(); - return this; - } - public Builder clearLastPacketInBlock() { - bitField0_ = (bitField0_ & ~0x00000004); - lastPacketInBlock_ = false; - onChanged(); - return this; - } - - // required sfixed32 dataLen = 4; - private int dataLen_ ; - public boolean hasDataLen() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public int getDataLen() { - return dataLen_; - } - public Builder setDataLen(int value) { - bitField0_ |= 0x00000008; - dataLen_ = value; - onChanged(); - return this; - } - public Builder clearDataLen() { - bitField0_ = (bitField0_ & ~0x00000008); - dataLen_ = 0; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:PacketHeaderProto) - } - - static { - defaultInstance = new PacketHeaderProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:PacketHeaderProto) - } - - public interface PipelineAckProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required sint64 seqno = 1; - boolean hasSeqno(); - long getSeqno(); - - // repeated .Status status = 2; - java.util.List getStatusList(); - int getStatusCount(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus(int index); - } - public static final class PipelineAckProto extends - com.google.protobuf.GeneratedMessage - implements PipelineAckProtoOrBuilder { - // Use PipelineAckProto.newBuilder() to construct. - private PipelineAckProto(Builder builder) { - super(builder); - } - private PipelineAckProto(boolean noInit) {} - - private static final PipelineAckProto defaultInstance; - public static PipelineAckProto getDefaultInstance() { - return defaultInstance; - } - - public PipelineAckProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PipelineAckProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PipelineAckProto_fieldAccessorTable; - } - - private int bitField0_; - // required sint64 seqno = 1; - public static final int SEQNO_FIELD_NUMBER = 1; - private long seqno_; - public boolean hasSeqno() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getSeqno() { - return seqno_; - } - - // repeated .Status status = 2; - public static final int STATUS_FIELD_NUMBER = 2; - private java.util.List status_; - public java.util.List getStatusList() { - return status_; - } - public int getStatusCount() { - return status_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus(int index) { - return status_.get(index); - } - - private void initFields() { - seqno_ = 0L; - status_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSeqno()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeSInt64(1, seqno_); - } - for (int i = 0; i < status_.size(); i++) { - output.writeEnum(2, status_.get(i).getNumber()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeSInt64Size(1, seqno_); - } - { - int dataSize = 0; - for (int i = 0; i < status_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeEnumSizeNoTag(status_.get(i).getNumber()); - } - size += dataSize; - size += 1 * status_.size(); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) obj; - - boolean result = true; - result = result && (hasSeqno() == other.hasSeqno()); - if (hasSeqno()) { - result = result && (getSeqno() - == other.getSeqno()); - } - result = result && getStatusList() - .equals(other.getStatusList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSeqno()) { - hash = (37 * hash) + SEQNO_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getSeqno()); - } - if (getStatusCount() > 0) { - hash = (37 * hash) + STATUS_FIELD_NUMBER; - hash = (53 * hash) + hashEnumList(getStatusList()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PipelineAckProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PipelineAckProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - seqno_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - status_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.seqno_ = seqno_; - if (((bitField0_ & 0x00000002) == 0x00000002)) { - status_ = java.util.Collections.unmodifiableList(status_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.status_ = status_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDefaultInstance()) return this; - if (other.hasSeqno()) { - setSeqno(other.getSeqno()); - } - if (!other.status_.isEmpty()) { - if (status_.isEmpty()) { - status_ = other.status_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureStatusIsMutable(); - status_.addAll(other.status_); - } - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSeqno()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - seqno_ = input.readSInt64(); - break; - } - case 16: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(2, rawValue); - } else { - addStatus(value); - } - break; - } - case 18: { - int length = input.readRawVarint32(); - int oldLimit = input.pushLimit(length); - while(input.getBytesUntilLimit() > 0) { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(2, rawValue); - } else { - addStatus(value); - } - } - input.popLimit(oldLimit); - break; - } - } - } - } - - private int bitField0_; - - // required sint64 seqno = 1; - private long seqno_ ; - public boolean hasSeqno() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getSeqno() { - return seqno_; - } - public Builder setSeqno(long value) { - bitField0_ |= 0x00000001; - seqno_ = value; - onChanged(); - return this; - } - public Builder clearSeqno() { - bitField0_ = (bitField0_ & ~0x00000001); - seqno_ = 0L; - onChanged(); - return this; - } - - // repeated .Status status = 2; - private java.util.List status_ = - java.util.Collections.emptyList(); - private void ensureStatusIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - status_ = new java.util.ArrayList(status_); - bitField0_ |= 0x00000002; - } - } - public java.util.List getStatusList() { - return java.util.Collections.unmodifiableList(status_); - } - public int getStatusCount() { - return status_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus(int index) { - return status_.get(index); - } - public Builder setStatus( - int index, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) { - if (value == null) { - throw new NullPointerException(); - } - ensureStatusIsMutable(); - status_.set(index, value); - onChanged(); - return this; - } - public Builder addStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) { - if (value == null) { - throw new NullPointerException(); - } - ensureStatusIsMutable(); - status_.add(value); - onChanged(); - return this; - } - public Builder addAllStatus( - java.lang.Iterable values) { - ensureStatusIsMutable(); - super.addAll(values, status_); - onChanged(); - return this; - } - public Builder clearStatus() { - status_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:PipelineAckProto) - } - - static { - defaultInstance = new PipelineAckProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:PipelineAckProto) - } - - public interface ReadOpChecksumInfoProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ChecksumProto checksum = 1; - boolean hasChecksum(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder(); - - // required uint64 chunkOffset = 2; - boolean hasChunkOffset(); - long getChunkOffset(); - } - public static final class ReadOpChecksumInfoProto extends - com.google.protobuf.GeneratedMessage - implements ReadOpChecksumInfoProtoOrBuilder { - // Use ReadOpChecksumInfoProto.newBuilder() to construct. - private ReadOpChecksumInfoProto(Builder builder) { - super(builder); - } - private ReadOpChecksumInfoProto(boolean noInit) {} - - private static final ReadOpChecksumInfoProto defaultInstance; - public static ReadOpChecksumInfoProto getDefaultInstance() { - return defaultInstance; - } - - public ReadOpChecksumInfoProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ReadOpChecksumInfoProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ReadOpChecksumInfoProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ChecksumProto checksum = 1; - public static final int CHECKSUM_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto checksum_; - public boolean hasChecksum() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum() { - return checksum_; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder() { - return checksum_; - } - - // required uint64 chunkOffset = 2; - public static final int CHUNKOFFSET_FIELD_NUMBER = 2; - private long chunkOffset_; - public boolean hasChunkOffset() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getChunkOffset() { - return chunkOffset_; - } - - private void initFields() { - checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance(); - chunkOffset_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasChecksum()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasChunkOffset()) { - memoizedIsInitialized = 0; - return false; - } - if (!getChecksum().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, checksum_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, chunkOffset_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, checksum_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, chunkOffset_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) obj; - - boolean result = true; - result = result && (hasChecksum() == other.hasChecksum()); - if (hasChecksum()) { - result = result && getChecksum() - .equals(other.getChecksum()); - } - result = result && (hasChunkOffset() == other.hasChunkOffset()); - if (hasChunkOffset()) { - result = result && (getChunkOffset() - == other.getChunkOffset()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasChecksum()) { - hash = (37 * hash) + CHECKSUM_FIELD_NUMBER; - hash = (53 * hash) + getChecksum().hashCode(); - } - if (hasChunkOffset()) { - hash = (37 * hash) + CHUNKOFFSET_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getChunkOffset()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ReadOpChecksumInfoProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ReadOpChecksumInfoProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getChecksumFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (checksumBuilder_ == null) { - checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance(); - } else { - checksumBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - chunkOffset_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (checksumBuilder_ == null) { - result.checksum_ = checksum_; - } else { - result.checksum_ = checksumBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.chunkOffset_ = chunkOffset_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) return this; - if (other.hasChecksum()) { - mergeChecksum(other.getChecksum()); - } - if (other.hasChunkOffset()) { - setChunkOffset(other.getChunkOffset()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasChecksum()) { - - return false; - } - if (!hasChunkOffset()) { - - return false; - } - if (!getChecksum().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder(); - if (hasChecksum()) { - subBuilder.mergeFrom(getChecksum()); - } - input.readMessage(subBuilder, extensionRegistry); - setChecksum(subBuilder.buildPartial()); - break; - } - case 16: { - bitField0_ |= 0x00000002; - chunkOffset_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required .ChecksumProto checksum = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> checksumBuilder_; - public boolean hasChecksum() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum() { - if (checksumBuilder_ == null) { - return checksum_; - } else { - return checksumBuilder_.getMessage(); - } - } - public Builder setChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) { - if (checksumBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - checksum_ = value; - onChanged(); - } else { - checksumBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setChecksum( - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder builderForValue) { - if (checksumBuilder_ == null) { - checksum_ = builderForValue.build(); - onChanged(); - } else { - checksumBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) { - if (checksumBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - checksum_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) { - checksum_ = - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder(checksum_).mergeFrom(value).buildPartial(); - } else { - checksum_ = value; - } - onChanged(); - } else { - checksumBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearChecksum() { - if (checksumBuilder_ == null) { - checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance(); - onChanged(); - } else { - checksumBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder getChecksumBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getChecksumFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder() { - if (checksumBuilder_ != null) { - return checksumBuilder_.getMessageOrBuilder(); - } else { - return checksum_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> - getChecksumFieldBuilder() { - if (checksumBuilder_ == null) { - checksumBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>( - checksum_, - getParentForChildren(), - isClean()); - checksum_ = null; - } - return checksumBuilder_; - } - - // required uint64 chunkOffset = 2; - private long chunkOffset_ ; - public boolean hasChunkOffset() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getChunkOffset() { - return chunkOffset_; - } - public Builder setChunkOffset(long value) { - bitField0_ |= 0x00000002; - chunkOffset_ = value; - onChanged(); - return this; - } - public Builder clearChunkOffset() { - bitField0_ = (bitField0_ & ~0x00000002); - chunkOffset_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:ReadOpChecksumInfoProto) - } - - static { - defaultInstance = new ReadOpChecksumInfoProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ReadOpChecksumInfoProto) - } - - public interface BlockOpResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .Status status = 1; - boolean hasStatus(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus(); - - // optional string firstBadLink = 2; - boolean hasFirstBadLink(); - String getFirstBadLink(); - - // optional .OpBlockChecksumResponseProto checksumResponse = 3; - boolean hasChecksumResponse(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder(); - - // optional .ReadOpChecksumInfoProto readOpChecksumInfo = 4; - boolean hasReadOpChecksumInfo(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder(); - - // optional string message = 5; - boolean hasMessage(); - String getMessage(); - } - public static final class BlockOpResponseProto extends - com.google.protobuf.GeneratedMessage - implements BlockOpResponseProtoOrBuilder { - // Use BlockOpResponseProto.newBuilder() to construct. - private BlockOpResponseProto(Builder builder) { - super(builder); - } - private BlockOpResponseProto(boolean noInit) {} - - private static final BlockOpResponseProto defaultInstance; - public static BlockOpResponseProto getDefaultInstance() { - return defaultInstance; - } - - public BlockOpResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BlockOpResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BlockOpResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .Status status = 1; - public static final int STATUS_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_; - public boolean hasStatus() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() { - return status_; - } - - // optional string firstBadLink = 2; - public static final int FIRSTBADLINK_FIELD_NUMBER = 2; - private java.lang.Object firstBadLink_; - public boolean hasFirstBadLink() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getFirstBadLink() { - java.lang.Object ref = firstBadLink_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - firstBadLink_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getFirstBadLinkBytes() { - java.lang.Object ref = firstBadLink_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - firstBadLink_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional .OpBlockChecksumResponseProto checksumResponse = 3; - public static final int CHECKSUMRESPONSE_FIELD_NUMBER = 3; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto checksumResponse_; - public boolean hasChecksumResponse() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse() { - return checksumResponse_; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder() { - return checksumResponse_; - } - - // optional .ReadOpChecksumInfoProto readOpChecksumInfo = 4; - public static final int READOPCHECKSUMINFO_FIELD_NUMBER = 4; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto readOpChecksumInfo_; - public boolean hasReadOpChecksumInfo() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo() { - return readOpChecksumInfo_; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder() { - return readOpChecksumInfo_; - } - - // optional string message = 5; - public static final int MESSAGE_FIELD_NUMBER = 5; - private java.lang.Object message_; - public boolean hasMessage() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public String getMessage() { - java.lang.Object ref = message_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - message_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getMessageBytes() { - java.lang.Object ref = message_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - message_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; - firstBadLink_ = ""; - checksumResponse_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance(); - readOpChecksumInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance(); - message_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasStatus()) { - memoizedIsInitialized = 0; - return false; - } - if (hasChecksumResponse()) { - if (!getChecksumResponse().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasReadOpChecksumInfo()) { - if (!getReadOpChecksumInfo().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, status_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getFirstBadLinkBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(3, checksumResponse_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeMessage(4, readOpChecksumInfo_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBytes(5, getMessageBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, status_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getFirstBadLinkBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, checksumResponse_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, readOpChecksumInfo_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(5, getMessageBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) obj; - - boolean result = true; - result = result && (hasStatus() == other.hasStatus()); - if (hasStatus()) { - result = result && - (getStatus() == other.getStatus()); - } - result = result && (hasFirstBadLink() == other.hasFirstBadLink()); - if (hasFirstBadLink()) { - result = result && getFirstBadLink() - .equals(other.getFirstBadLink()); - } - result = result && (hasChecksumResponse() == other.hasChecksumResponse()); - if (hasChecksumResponse()) { - result = result && getChecksumResponse() - .equals(other.getChecksumResponse()); - } - result = result && (hasReadOpChecksumInfo() == other.hasReadOpChecksumInfo()); - if (hasReadOpChecksumInfo()) { - result = result && getReadOpChecksumInfo() - .equals(other.getReadOpChecksumInfo()); - } - result = result && (hasMessage() == other.hasMessage()); - if (hasMessage()) { - result = result && getMessage() - .equals(other.getMessage()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasStatus()) { - hash = (37 * hash) + STATUS_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getStatus()); - } - if (hasFirstBadLink()) { - hash = (37 * hash) + FIRSTBADLINK_FIELD_NUMBER; - hash = (53 * hash) + getFirstBadLink().hashCode(); - } - if (hasChecksumResponse()) { - hash = (37 * hash) + CHECKSUMRESPONSE_FIELD_NUMBER; - hash = (53 * hash) + getChecksumResponse().hashCode(); - } - if (hasReadOpChecksumInfo()) { - hash = (37 * hash) + READOPCHECKSUMINFO_FIELD_NUMBER; - hash = (53 * hash) + getReadOpChecksumInfo().hashCode(); - } - if (hasMessage()) { - hash = (37 * hash) + MESSAGE_FIELD_NUMBER; - hash = (53 * hash) + getMessage().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BlockOpResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BlockOpResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getChecksumResponseFieldBuilder(); - getReadOpChecksumInfoFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; - bitField0_ = (bitField0_ & ~0x00000001); - firstBadLink_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - if (checksumResponseBuilder_ == null) { - checksumResponse_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance(); - } else { - checksumResponseBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - if (readOpChecksumInfoBuilder_ == null) { - readOpChecksumInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance(); - } else { - readOpChecksumInfoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - message_ = ""; - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.status_ = status_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.firstBadLink_ = firstBadLink_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - if (checksumResponseBuilder_ == null) { - result.checksumResponse_ = checksumResponse_; - } else { - result.checksumResponse_ = checksumResponseBuilder_.build(); - } - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - if (readOpChecksumInfoBuilder_ == null) { - result.readOpChecksumInfo_ = readOpChecksumInfo_; - } else { - result.readOpChecksumInfo_ = readOpChecksumInfoBuilder_.build(); - } - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.message_ = message_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.getDefaultInstance()) return this; - if (other.hasStatus()) { - setStatus(other.getStatus()); - } - if (other.hasFirstBadLink()) { - setFirstBadLink(other.getFirstBadLink()); - } - if (other.hasChecksumResponse()) { - mergeChecksumResponse(other.getChecksumResponse()); - } - if (other.hasReadOpChecksumInfo()) { - mergeReadOpChecksumInfo(other.getReadOpChecksumInfo()); - } - if (other.hasMessage()) { - setMessage(other.getMessage()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasStatus()) { - - return false; - } - if (hasChecksumResponse()) { - if (!getChecksumResponse().isInitialized()) { - - return false; - } - } - if (hasReadOpChecksumInfo()) { - if (!getReadOpChecksumInfo().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - status_ = value; - } - break; - } - case 18: { - bitField0_ |= 0x00000002; - firstBadLink_ = input.readBytes(); - break; - } - case 26: { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.newBuilder(); - if (hasChecksumResponse()) { - subBuilder.mergeFrom(getChecksumResponse()); - } - input.readMessage(subBuilder, extensionRegistry); - setChecksumResponse(subBuilder.buildPartial()); - break; - } - case 34: { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.newBuilder(); - if (hasReadOpChecksumInfo()) { - subBuilder.mergeFrom(getReadOpChecksumInfo()); - } - input.readMessage(subBuilder, extensionRegistry); - setReadOpChecksumInfo(subBuilder.buildPartial()); - break; - } - case 42: { - bitField0_ |= 0x00000010; - message_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required .Status status = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; - public boolean hasStatus() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() { - return status_; - } - public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - status_ = value; - onChanged(); - return this; - } - public Builder clearStatus() { - bitField0_ = (bitField0_ & ~0x00000001); - status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; - onChanged(); - return this; - } - - // optional string firstBadLink = 2; - private java.lang.Object firstBadLink_ = ""; - public boolean hasFirstBadLink() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getFirstBadLink() { - java.lang.Object ref = firstBadLink_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - firstBadLink_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setFirstBadLink(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - firstBadLink_ = value; - onChanged(); - return this; - } - public Builder clearFirstBadLink() { - bitField0_ = (bitField0_ & ~0x00000002); - firstBadLink_ = getDefaultInstance().getFirstBadLink(); - onChanged(); - return this; - } - void setFirstBadLink(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - firstBadLink_ = value; - onChanged(); - } - - // optional .OpBlockChecksumResponseProto checksumResponse = 3; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto checksumResponse_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder> checksumResponseBuilder_; - public boolean hasChecksumResponse() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse() { - if (checksumResponseBuilder_ == null) { - return checksumResponse_; - } else { - return checksumResponseBuilder_.getMessage(); - } - } - public Builder setChecksumResponse(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto value) { - if (checksumResponseBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - checksumResponse_ = value; - onChanged(); - } else { - checksumResponseBuilder_.setMessage(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder setChecksumResponse( - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder builderForValue) { - if (checksumResponseBuilder_ == null) { - checksumResponse_ = builderForValue.build(); - onChanged(); - } else { - checksumResponseBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder mergeChecksumResponse(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto value) { - if (checksumResponseBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && - checksumResponse_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance()) { - checksumResponse_ = - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.newBuilder(checksumResponse_).mergeFrom(value).buildPartial(); - } else { - checksumResponse_ = value; - } - onChanged(); - } else { - checksumResponseBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder clearChecksumResponse() { - if (checksumResponseBuilder_ == null) { - checksumResponse_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance(); - onChanged(); - } else { - checksumResponseBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder getChecksumResponseBuilder() { - bitField0_ |= 0x00000004; - onChanged(); - return getChecksumResponseFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder() { - if (checksumResponseBuilder_ != null) { - return checksumResponseBuilder_.getMessageOrBuilder(); - } else { - return checksumResponse_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder> - getChecksumResponseFieldBuilder() { - if (checksumResponseBuilder_ == null) { - checksumResponseBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder>( - checksumResponse_, - getParentForChildren(), - isClean()); - checksumResponse_ = null; - } - return checksumResponseBuilder_; - } - - // optional .ReadOpChecksumInfoProto readOpChecksumInfo = 4; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto readOpChecksumInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder> readOpChecksumInfoBuilder_; - public boolean hasReadOpChecksumInfo() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo() { - if (readOpChecksumInfoBuilder_ == null) { - return readOpChecksumInfo_; - } else { - return readOpChecksumInfoBuilder_.getMessage(); - } - } - public Builder setReadOpChecksumInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto value) { - if (readOpChecksumInfoBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - readOpChecksumInfo_ = value; - onChanged(); - } else { - readOpChecksumInfoBuilder_.setMessage(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder setReadOpChecksumInfo( - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder builderForValue) { - if (readOpChecksumInfoBuilder_ == null) { - readOpChecksumInfo_ = builderForValue.build(); - onChanged(); - } else { - readOpChecksumInfoBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder mergeReadOpChecksumInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto value) { - if (readOpChecksumInfoBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008) && - readOpChecksumInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) { - readOpChecksumInfo_ = - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.newBuilder(readOpChecksumInfo_).mergeFrom(value).buildPartial(); - } else { - readOpChecksumInfo_ = value; - } - onChanged(); - } else { - readOpChecksumInfoBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder clearReadOpChecksumInfo() { - if (readOpChecksumInfoBuilder_ == null) { - readOpChecksumInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance(); - onChanged(); - } else { - readOpChecksumInfoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder getReadOpChecksumInfoBuilder() { - bitField0_ |= 0x00000008; - onChanged(); - return getReadOpChecksumInfoFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder() { - if (readOpChecksumInfoBuilder_ != null) { - return readOpChecksumInfoBuilder_.getMessageOrBuilder(); - } else { - return readOpChecksumInfo_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder> - getReadOpChecksumInfoFieldBuilder() { - if (readOpChecksumInfoBuilder_ == null) { - readOpChecksumInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder>( - readOpChecksumInfo_, - getParentForChildren(), - isClean()); - readOpChecksumInfo_ = null; - } - return readOpChecksumInfoBuilder_; - } - - // optional string message = 5; - private java.lang.Object message_ = ""; - public boolean hasMessage() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public String getMessage() { - java.lang.Object ref = message_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - message_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setMessage(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000010; - message_ = value; - onChanged(); - return this; - } - public Builder clearMessage() { - bitField0_ = (bitField0_ & ~0x00000010); - message_ = getDefaultInstance().getMessage(); - onChanged(); - return this; - } - void setMessage(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000010; - message_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:BlockOpResponseProto) - } - - static { - defaultInstance = new BlockOpResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:BlockOpResponseProto) - } - - public interface ClientReadStatusProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .Status status = 1; - boolean hasStatus(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus(); - } - public static final class ClientReadStatusProto extends - com.google.protobuf.GeneratedMessage - implements ClientReadStatusProtoOrBuilder { - // Use ClientReadStatusProto.newBuilder() to construct. - private ClientReadStatusProto(Builder builder) { - super(builder); - } - private ClientReadStatusProto(boolean noInit) {} - - private static final ClientReadStatusProto defaultInstance; - public static ClientReadStatusProto getDefaultInstance() { - return defaultInstance; - } - - public ClientReadStatusProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientReadStatusProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientReadStatusProto_fieldAccessorTable; - } - - private int bitField0_; - // required .Status status = 1; - public static final int STATUS_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_; - public boolean hasStatus() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() { - return status_; - } - - private void initFields() { - status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasStatus()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, status_.getNumber()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, status_.getNumber()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto) obj; - - boolean result = true; - result = result && (hasStatus() == other.hasStatus()); - if (hasStatus()) { - result = result && - (getStatus() == other.getStatus()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasStatus()) { - hash = (37 * hash) + STATUS_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getStatus()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientReadStatusProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientReadStatusProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.status_ = status_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.getDefaultInstance()) return this; - if (other.hasStatus()) { - setStatus(other.getStatus()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasStatus()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - status_ = value; - } - break; - } - } - } - } - - private int bitField0_; - - // required .Status status = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; - public boolean hasStatus() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() { - return status_; - } - public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - status_ = value; - onChanged(); - return this; - } - public Builder clearStatus() { - bitField0_ = (bitField0_ & ~0x00000001); - status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:ClientReadStatusProto) - } - - static { - defaultInstance = new ClientReadStatusProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ClientReadStatusProto) - } - - public interface DNTransferAckProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .Status status = 1; - boolean hasStatus(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus(); - } - public static final class DNTransferAckProto extends - com.google.protobuf.GeneratedMessage - implements DNTransferAckProtoOrBuilder { - // Use DNTransferAckProto.newBuilder() to construct. - private DNTransferAckProto(Builder builder) { - super(builder); - } - private DNTransferAckProto(boolean noInit) {} - - private static final DNTransferAckProto defaultInstance; - public static DNTransferAckProto getDefaultInstance() { - return defaultInstance; - } - - public DNTransferAckProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_DNTransferAckProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_DNTransferAckProto_fieldAccessorTable; - } - - private int bitField0_; - // required .Status status = 1; - public static final int STATUS_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_; - public boolean hasStatus() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() { - return status_; - } - - private void initFields() { - status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasStatus()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, status_.getNumber()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, status_.getNumber()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto) obj; - - boolean result = true; - result = result && (hasStatus() == other.hasStatus()); - if (hasStatus()) { - result = result && - (getStatus() == other.getStatus()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasStatus()) { - hash = (37 * hash) + STATUS_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getStatus()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_DNTransferAckProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_DNTransferAckProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.status_ = status_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.getDefaultInstance()) return this; - if (other.hasStatus()) { - setStatus(other.getStatus()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasStatus()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - status_ = value; - } - break; - } - } - } - } - - private int bitField0_; - - // required .Status status = 1; - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; - public boolean hasStatus() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() { - return status_; - } - public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - status_ = value; - onChanged(); - return this; - } - public Builder clearStatus() { - bitField0_ = (bitField0_ & ~0x00000001); - status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:DNTransferAckProto) - } - - static { - defaultInstance = new DNTransferAckProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:DNTransferAckProto) - } - - public interface OpBlockChecksumResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint32 bytesPerCrc = 1; - boolean hasBytesPerCrc(); - int getBytesPerCrc(); - - // required uint64 crcPerBlock = 2; - boolean hasCrcPerBlock(); - long getCrcPerBlock(); - - // required bytes md5 = 3; - boolean hasMd5(); - com.google.protobuf.ByteString getMd5(); - } - public static final class OpBlockChecksumResponseProto extends - com.google.protobuf.GeneratedMessage - implements OpBlockChecksumResponseProtoOrBuilder { - // Use OpBlockChecksumResponseProto.newBuilder() to construct. - private OpBlockChecksumResponseProto(Builder builder) { - super(builder); - } - private OpBlockChecksumResponseProto(boolean noInit) {} - - private static final OpBlockChecksumResponseProto defaultInstance; - public static OpBlockChecksumResponseProto getDefaultInstance() { - return defaultInstance; - } - - public OpBlockChecksumResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint32 bytesPerCrc = 1; - public static final int BYTESPERCRC_FIELD_NUMBER = 1; - private int bytesPerCrc_; - public boolean hasBytesPerCrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public int getBytesPerCrc() { - return bytesPerCrc_; - } - - // required uint64 crcPerBlock = 2; - public static final int CRCPERBLOCK_FIELD_NUMBER = 2; - private long crcPerBlock_; - public boolean hasCrcPerBlock() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getCrcPerBlock() { - return crcPerBlock_; - } - - // required bytes md5 = 3; - public static final int MD5_FIELD_NUMBER = 3; - private com.google.protobuf.ByteString md5_; - public boolean hasMd5() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public com.google.protobuf.ByteString getMd5() { - return md5_; - } - - private void initFields() { - bytesPerCrc_ = 0; - crcPerBlock_ = 0L; - md5_ = com.google.protobuf.ByteString.EMPTY; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBytesPerCrc()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCrcPerBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasMd5()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt32(1, bytesPerCrc_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, crcPerBlock_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, md5_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(1, bytesPerCrc_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, crcPerBlock_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, md5_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto) obj; - - boolean result = true; - result = result && (hasBytesPerCrc() == other.hasBytesPerCrc()); - if (hasBytesPerCrc()) { - result = result && (getBytesPerCrc() - == other.getBytesPerCrc()); - } - result = result && (hasCrcPerBlock() == other.hasCrcPerBlock()); - if (hasCrcPerBlock()) { - result = result && (getCrcPerBlock() - == other.getCrcPerBlock()); - } - result = result && (hasMd5() == other.hasMd5()); - if (hasMd5()) { - result = result && getMd5() - .equals(other.getMd5()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBytesPerCrc()) { - hash = (37 * hash) + BYTESPERCRC_FIELD_NUMBER; - hash = (53 * hash) + getBytesPerCrc(); - } - if (hasCrcPerBlock()) { - hash = (37 * hash) + CRCPERBLOCK_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCrcPerBlock()); - } - if (hasMd5()) { - hash = (37 * hash) + MD5_FIELD_NUMBER; - hash = (53 * hash) + getMd5().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - bytesPerCrc_ = 0; - bitField0_ = (bitField0_ & ~0x00000001); - crcPerBlock_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - md5_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.bytesPerCrc_ = bytesPerCrc_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.crcPerBlock_ = crcPerBlock_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.md5_ = md5_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance()) return this; - if (other.hasBytesPerCrc()) { - setBytesPerCrc(other.getBytesPerCrc()); - } - if (other.hasCrcPerBlock()) { - setCrcPerBlock(other.getCrcPerBlock()); - } - if (other.hasMd5()) { - setMd5(other.getMd5()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBytesPerCrc()) { - - return false; - } - if (!hasCrcPerBlock()) { - - return false; - } - if (!hasMd5()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - bytesPerCrc_ = input.readUInt32(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - crcPerBlock_ = input.readUInt64(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - md5_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required uint32 bytesPerCrc = 1; - private int bytesPerCrc_ ; - public boolean hasBytesPerCrc() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public int getBytesPerCrc() { - return bytesPerCrc_; - } - public Builder setBytesPerCrc(int value) { - bitField0_ |= 0x00000001; - bytesPerCrc_ = value; - onChanged(); - return this; - } - public Builder clearBytesPerCrc() { - bitField0_ = (bitField0_ & ~0x00000001); - bytesPerCrc_ = 0; - onChanged(); - return this; - } - - // required uint64 crcPerBlock = 2; - private long crcPerBlock_ ; - public boolean hasCrcPerBlock() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getCrcPerBlock() { - return crcPerBlock_; - } - public Builder setCrcPerBlock(long value) { - bitField0_ |= 0x00000002; - crcPerBlock_ = value; - onChanged(); - return this; - } - public Builder clearCrcPerBlock() { - bitField0_ = (bitField0_ & ~0x00000002); - crcPerBlock_ = 0L; - onChanged(); - return this; - } - - // required bytes md5 = 3; - private com.google.protobuf.ByteString md5_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasMd5() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public com.google.protobuf.ByteString getMd5() { - return md5_; - } - public Builder setMd5(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - md5_ = value; - onChanged(); - return this; - } - public Builder clearMd5() { - bitField0_ = (bitField0_ & ~0x00000004); - md5_ = getDefaultInstance().getMd5(); - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:OpBlockChecksumResponseProto) - } - - static { - defaultInstance = new OpBlockChecksumResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:OpBlockChecksumResponseProto) - } - - private static com.google.protobuf.Descriptors.Descriptor - internal_static_BaseHeaderProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_BaseHeaderProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ClientOperationHeaderProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ClientOperationHeaderProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_OpReadBlockProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_OpReadBlockProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ChecksumProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ChecksumProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_OpWriteBlockProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_OpWriteBlockProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_OpTransferBlockProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_OpTransferBlockProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_OpReplaceBlockProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_OpReplaceBlockProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_OpCopyBlockProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_OpCopyBlockProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_OpBlockChecksumProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_OpBlockChecksumProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_PacketHeaderProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_PacketHeaderProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_PipelineAckProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_PipelineAckProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ReadOpChecksumInfoProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ReadOpChecksumInfoProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_BlockOpResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_BlockOpResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ClientReadStatusProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ClientReadStatusProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_DNTransferAckProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_DNTransferAckProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_OpBlockChecksumResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_OpBlockChecksumResponseProto_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; - } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n\022datatransfer.proto\032\nhdfs.proto\"`\n\017Base" + - "HeaderProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBlo" + - "ckProto\022)\n\005token\030\002 \001(\0132\032.BlockTokenIdent" + - "ifierProto\"V\n\032ClientOperationHeaderProto" + - "\022$\n\nbaseHeader\030\001 \002(\0132\020.BaseHeaderProto\022\022" + - "\n\nclientName\030\002 \002(\t\"\\\n\020OpReadBlockProto\022+" + - "\n\006header\030\001 \002(\0132\033.ClientOperationHeaderPr" + - "oto\022\016\n\006offset\030\002 \002(\004\022\013\n\003len\030\003 \002(\004\"\205\001\n\rChe" + - "cksumProto\022)\n\004type\030\001 \002(\0162\033.ChecksumProto" + - ".ChecksumType\022\030\n\020bytesPerChecksum\030\002 \002(\r\"", - "/\n\014ChecksumType\022\010\n\004NULL\020\000\022\t\n\005CRC32\020\001\022\n\n\006" + - "CRC32C\020\002\"\332\004\n\021OpWriteBlockProto\022+\n\006header" + - "\030\001 \002(\0132\033.ClientOperationHeaderProto\022#\n\007t" + - "argets\030\002 \003(\0132\022.DatanodeInfoProto\022\"\n\006sour" + - "ce\030\003 \001(\0132\022.DatanodeInfoProto\0228\n\005stage\030\004 " + - "\002(\0162).OpWriteBlockProto.BlockConstructio" + - "nStage\022\024\n\014pipelineSize\030\005 \002(\r\022\024\n\014minBytes" + - "Rcvd\030\006 \002(\004\022\024\n\014maxBytesRcvd\030\007 \002(\004\022\035\n\025late" + - "stGenerationStamp\030\010 \002(\004\022)\n\021requestedChec" + - "ksum\030\t \002(\0132\016.ChecksumProto\"\210\002\n\026BlockCons", - "tructionStage\022\031\n\025PIPELINE_SETUP_APPEND\020\000" + - "\022\"\n\036PIPELINE_SETUP_APPEND_RECOVERY\020\001\022\022\n\016" + - "DATA_STREAMING\020\002\022%\n!PIPELINE_SETUP_STREA" + - "MING_RECOVERY\020\003\022\022\n\016PIPELINE_CLOSE\020\004\022\033\n\027P" + - "IPELINE_CLOSE_RECOVERY\020\005\022\031\n\025PIPELINE_SET" + - "UP_CREATE\020\006\022\020\n\014TRANSFER_RBW\020\007\022\026\n\022TRANSFE" + - "R_FINALIZED\020\010\"h\n\024OpTransferBlockProto\022+\n" + - "\006header\030\001 \002(\0132\033.ClientOperationHeaderPro" + - "to\022#\n\007targets\030\002 \003(\0132\022.DatanodeInfoProto\"" + - "l\n\023OpReplaceBlockProto\022 \n\006header\030\001 \002(\0132\020", - ".BaseHeaderProto\022\017\n\007delHint\030\002 \002(\t\022\"\n\006sou" + - "rce\030\003 \002(\0132\022.DatanodeInfoProto\"4\n\020OpCopyB" + - "lockProto\022 \n\006header\030\001 \002(\0132\020.BaseHeaderPr" + - "oto\"8\n\024OpBlockChecksumProto\022 \n\006header\030\001 " + - "\002(\0132\020.BaseHeaderProto\"e\n\021PacketHeaderPro" + - "to\022\025\n\roffsetInBlock\030\001 \002(\020\022\r\n\005seqno\030\002 \002(\020" + - "\022\031\n\021lastPacketInBlock\030\003 \002(\010\022\017\n\007dataLen\030\004" + - " \002(\017\":\n\020PipelineAckProto\022\r\n\005seqno\030\001 \002(\022\022" + - "\027\n\006status\030\002 \003(\0162\007.Status\"P\n\027ReadOpChecks" + - "umInfoProto\022 \n\010checksum\030\001 \002(\0132\016.Checksum", - "Proto\022\023\n\013chunkOffset\030\002 \002(\004\"\305\001\n\024BlockOpRe" + - "sponseProto\022\027\n\006status\030\001 \002(\0162\007.Status\022\024\n\014" + - "firstBadLink\030\002 \001(\t\0227\n\020checksumResponse\030\003" + - " \001(\0132\035.OpBlockChecksumResponseProto\0224\n\022r" + - "eadOpChecksumInfo\030\004 \001(\0132\030.ReadOpChecksum" + - "InfoProto\022\017\n\007message\030\005 \001(\t\"0\n\025ClientRead" + - "StatusProto\022\027\n\006status\030\001 \002(\0162\007.Status\"-\n\022" + - "DNTransferAckProto\022\027\n\006status\030\001 \002(\0162\007.Sta" + - "tus\"U\n\034OpBlockChecksumResponseProto\022\023\n\013b" + - "ytesPerCrc\030\001 \002(\r\022\023\n\013crcPerBlock\030\002 \002(\004\022\013\n", - "\003md5\030\003 \002(\014*\202\001\n\006Status\022\013\n\007SUCCESS\020\000\022\t\n\005ER" + - "ROR\020\001\022\022\n\016ERROR_CHECKSUM\020\002\022\021\n\rERROR_INVAL" + - "ID\020\003\022\020\n\014ERROR_EXISTS\020\004\022\026\n\022ERROR_ACCESS_T" + - "OKEN\020\005\022\017\n\013CHECKSUM_OK\020\006B>\n%org.apache.ha" + - "doop.hdfs.protocol.protoB\022DataTransferPr" + - "otos\240\001\001" - }; - com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { - public com.google.protobuf.ExtensionRegistry assignDescriptors( - com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - internal_static_BaseHeaderProto_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_BaseHeaderProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_BaseHeaderProto_descriptor, - new java.lang.String[] { "Block", "Token", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder.class); - internal_static_ClientOperationHeaderProto_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_ClientOperationHeaderProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ClientOperationHeaderProto_descriptor, - new java.lang.String[] { "BaseHeader", "ClientName", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder.class); - internal_static_OpReadBlockProto_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_OpReadBlockProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_OpReadBlockProto_descriptor, - new java.lang.String[] { "Header", "Offset", "Len", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.Builder.class); - internal_static_ChecksumProto_descriptor = - getDescriptor().getMessageTypes().get(3); - internal_static_ChecksumProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ChecksumProto_descriptor, - new java.lang.String[] { "Type", "BytesPerChecksum", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder.class); - internal_static_OpWriteBlockProto_descriptor = - getDescriptor().getMessageTypes().get(4); - internal_static_OpWriteBlockProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_OpWriteBlockProto_descriptor, - new java.lang.String[] { "Header", "Targets", "Source", "Stage", "PipelineSize", "MinBytesRcvd", "MaxBytesRcvd", "LatestGenerationStamp", "RequestedChecksum", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.Builder.class); - internal_static_OpTransferBlockProto_descriptor = - getDescriptor().getMessageTypes().get(5); - internal_static_OpTransferBlockProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_OpTransferBlockProto_descriptor, - new java.lang.String[] { "Header", "Targets", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.Builder.class); - internal_static_OpReplaceBlockProto_descriptor = - getDescriptor().getMessageTypes().get(6); - internal_static_OpReplaceBlockProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_OpReplaceBlockProto_descriptor, - new java.lang.String[] { "Header", "DelHint", "Source", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.Builder.class); - internal_static_OpCopyBlockProto_descriptor = - getDescriptor().getMessageTypes().get(7); - internal_static_OpCopyBlockProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_OpCopyBlockProto_descriptor, - new java.lang.String[] { "Header", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.Builder.class); - internal_static_OpBlockChecksumProto_descriptor = - getDescriptor().getMessageTypes().get(8); - internal_static_OpBlockChecksumProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_OpBlockChecksumProto_descriptor, - new java.lang.String[] { "Header", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.Builder.class); - internal_static_PacketHeaderProto_descriptor = - getDescriptor().getMessageTypes().get(9); - internal_static_PacketHeaderProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_PacketHeaderProto_descriptor, - new java.lang.String[] { "OffsetInBlock", "Seqno", "LastPacketInBlock", "DataLen", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.Builder.class); - internal_static_PipelineAckProto_descriptor = - getDescriptor().getMessageTypes().get(10); - internal_static_PipelineAckProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_PipelineAckProto_descriptor, - new java.lang.String[] { "Seqno", "Status", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.Builder.class); - internal_static_ReadOpChecksumInfoProto_descriptor = - getDescriptor().getMessageTypes().get(11); - internal_static_ReadOpChecksumInfoProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ReadOpChecksumInfoProto_descriptor, - new java.lang.String[] { "Checksum", "ChunkOffset", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder.class); - internal_static_BlockOpResponseProto_descriptor = - getDescriptor().getMessageTypes().get(12); - internal_static_BlockOpResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_BlockOpResponseProto_descriptor, - new java.lang.String[] { "Status", "FirstBadLink", "ChecksumResponse", "ReadOpChecksumInfo", "Message", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder.class); - internal_static_ClientReadStatusProto_descriptor = - getDescriptor().getMessageTypes().get(13); - internal_static_ClientReadStatusProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ClientReadStatusProto_descriptor, - new java.lang.String[] { "Status", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.Builder.class); - internal_static_DNTransferAckProto_descriptor = - getDescriptor().getMessageTypes().get(14); - internal_static_DNTransferAckProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_DNTransferAckProto_descriptor, - new java.lang.String[] { "Status", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.Builder.class); - internal_static_OpBlockChecksumResponseProto_descriptor = - getDescriptor().getMessageTypes().get(15); - internal_static_OpBlockChecksumResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_OpBlockChecksumResponseProto_descriptor, - new java.lang.String[] { "BytesPerCrc", "CrcPerBlock", "Md5", }, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder.class); - return null; - } - }; - com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(), - }, assigner); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/DatanodeProtocolProtos.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/DatanodeProtocolProtos.java deleted file mode 100644 index 361e550851e..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/DatanodeProtocolProtos.java +++ /dev/null @@ -1,17233 +0,0 @@ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: DatanodeProtocol.proto - -package org.apache.hadoop.hdfs.protocol.proto; - -public final class DatanodeProtocolProtos { - private DatanodeProtocolProtos() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistry registry) { - } - public interface DatanodeRegistrationProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .DatanodeIDProto datanodeID = 1; - boolean hasDatanodeID(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeID(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDOrBuilder(); - - // required .StorageInfoProto storageInfo = 2; - boolean hasStorageInfo(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder(); - - // required .ExportedBlockKeysProto keys = 3; - boolean hasKeys(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder(); - } - public static final class DatanodeRegistrationProto extends - com.google.protobuf.GeneratedMessage - implements DatanodeRegistrationProtoOrBuilder { - // Use DatanodeRegistrationProto.newBuilder() to construct. - private DatanodeRegistrationProto(Builder builder) { - super(builder); - } - private DatanodeRegistrationProto(boolean noInit) {} - - private static final DatanodeRegistrationProto defaultInstance; - public static DatanodeRegistrationProto getDefaultInstance() { - return defaultInstance; - } - - public DatanodeRegistrationProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeRegistrationProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeRegistrationProto_fieldAccessorTable; - } - - private int bitField0_; - // required .DatanodeIDProto datanodeID = 1; - public static final int DATANODEID_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanodeID_; - public boolean hasDatanodeID() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeID() { - return datanodeID_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDOrBuilder() { - return datanodeID_; - } - - // required .StorageInfoProto storageInfo = 2; - public static final int STORAGEINFO_FIELD_NUMBER = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_; - public boolean hasStorageInfo() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { - return storageInfo_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { - return storageInfo_; - } - - // required .ExportedBlockKeysProto keys = 3; - public static final int KEYS_FIELD_NUMBER = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_; - public boolean hasKeys() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() { - return keys_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() { - return keys_; - } - - private void initFields() { - datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); - storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasDatanodeID()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasStorageInfo()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasKeys()) { - memoizedIsInitialized = 0; - return false; - } - if (!getDatanodeID().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - if (!getStorageInfo().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - if (!getKeys().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, datanodeID_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, storageInfo_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(3, keys_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, datanodeID_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, storageInfo_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, keys_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto) obj; - - boolean result = true; - result = result && (hasDatanodeID() == other.hasDatanodeID()); - if (hasDatanodeID()) { - result = result && getDatanodeID() - .equals(other.getDatanodeID()); - } - result = result && (hasStorageInfo() == other.hasStorageInfo()); - if (hasStorageInfo()) { - result = result && getStorageInfo() - .equals(other.getStorageInfo()); - } - result = result && (hasKeys() == other.hasKeys()); - if (hasKeys()) { - result = result && getKeys() - .equals(other.getKeys()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasDatanodeID()) { - hash = (37 * hash) + DATANODEID_FIELD_NUMBER; - hash = (53 * hash) + getDatanodeID().hashCode(); - } - if (hasStorageInfo()) { - hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER; - hash = (53 * hash) + getStorageInfo().hashCode(); - } - if (hasKeys()) { - hash = (37 * hash) + KEYS_FIELD_NUMBER; - hash = (53 * hash) + getKeys().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeRegistrationProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeRegistrationProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getDatanodeIDFieldBuilder(); - getStorageInfoFieldBuilder(); - getKeysFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (datanodeIDBuilder_ == null) { - datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); - } else { - datanodeIDBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - if (storageInfoBuilder_ == null) { - storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - } else { - storageInfoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - if (keysBuilder_ == null) { - keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); - } else { - keysBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (datanodeIDBuilder_ == null) { - result.datanodeID_ = datanodeID_; - } else { - result.datanodeID_ = datanodeIDBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (storageInfoBuilder_ == null) { - result.storageInfo_ = storageInfo_; - } else { - result.storageInfo_ = storageInfoBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - if (keysBuilder_ == null) { - result.keys_ = keys_; - } else { - result.keys_ = keysBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) return this; - if (other.hasDatanodeID()) { - mergeDatanodeID(other.getDatanodeID()); - } - if (other.hasStorageInfo()) { - mergeStorageInfo(other.getStorageInfo()); - } - if (other.hasKeys()) { - mergeKeys(other.getKeys()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasDatanodeID()) { - - return false; - } - if (!hasStorageInfo()) { - - return false; - } - if (!hasKeys()) { - - return false; - } - if (!getDatanodeID().isInitialized()) { - - return false; - } - if (!getStorageInfo().isInitialized()) { - - return false; - } - if (!getKeys().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(); - if (hasDatanodeID()) { - subBuilder.mergeFrom(getDatanodeID()); - } - input.readMessage(subBuilder, extensionRegistry); - setDatanodeID(subBuilder.buildPartial()); - break; - } - case 18: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(); - if (hasStorageInfo()) { - subBuilder.mergeFrom(getStorageInfo()); - } - input.readMessage(subBuilder, extensionRegistry); - setStorageInfo(subBuilder.buildPartial()); - break; - } - case 26: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder(); - if (hasKeys()) { - subBuilder.mergeFrom(getKeys()); - } - input.readMessage(subBuilder, extensionRegistry); - setKeys(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .DatanodeIDProto datanodeID = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> datanodeIDBuilder_; - public boolean hasDatanodeID() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeID() { - if (datanodeIDBuilder_ == null) { - return datanodeID_; - } else { - return datanodeIDBuilder_.getMessage(); - } - } - public Builder setDatanodeID(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { - if (datanodeIDBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - datanodeID_ = value; - onChanged(); - } else { - datanodeIDBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setDatanodeID( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { - if (datanodeIDBuilder_ == null) { - datanodeID_ = builderForValue.build(); - onChanged(); - } else { - datanodeIDBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeDatanodeID(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { - if (datanodeIDBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - datanodeID_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) { - datanodeID_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(datanodeID_).mergeFrom(value).buildPartial(); - } else { - datanodeID_ = value; - } - onChanged(); - } else { - datanodeIDBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearDatanodeID() { - if (datanodeIDBuilder_ == null) { - datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); - onChanged(); - } else { - datanodeIDBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getDatanodeIDBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getDatanodeIDFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDOrBuilder() { - if (datanodeIDBuilder_ != null) { - return datanodeIDBuilder_.getMessageOrBuilder(); - } else { - return datanodeID_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> - getDatanodeIDFieldBuilder() { - if (datanodeIDBuilder_ == null) { - datanodeIDBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>( - datanodeID_, - getParentForChildren(), - isClean()); - datanodeID_ = null; - } - return datanodeIDBuilder_; - } - - // required .StorageInfoProto storageInfo = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_; - public boolean hasStorageInfo() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { - if (storageInfoBuilder_ == null) { - return storageInfo_; - } else { - return storageInfoBuilder_.getMessage(); - } - } - public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { - if (storageInfoBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - storageInfo_ = value; - onChanged(); - } else { - storageInfoBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setStorageInfo( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) { - if (storageInfoBuilder_ == null) { - storageInfo_ = builderForValue.build(); - onChanged(); - } else { - storageInfoBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { - if (storageInfoBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) { - storageInfo_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial(); - } else { - storageInfo_ = value; - } - onChanged(); - } else { - storageInfoBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearStorageInfo() { - if (storageInfoBuilder_ == null) { - storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - onChanged(); - } else { - storageInfoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getStorageInfoBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getStorageInfoFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { - if (storageInfoBuilder_ != null) { - return storageInfoBuilder_.getMessageOrBuilder(); - } else { - return storageInfo_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> - getStorageInfoFieldBuilder() { - if (storageInfoBuilder_ == null) { - storageInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>( - storageInfo_, - getParentForChildren(), - isClean()); - storageInfo_ = null; - } - return storageInfoBuilder_; - } - - // required .ExportedBlockKeysProto keys = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder> keysBuilder_; - public boolean hasKeys() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() { - if (keysBuilder_ == null) { - return keys_; - } else { - return keysBuilder_.getMessage(); - } - } - public Builder setKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) { - if (keysBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - keys_ = value; - onChanged(); - } else { - keysBuilder_.setMessage(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder setKeys( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder builderForValue) { - if (keysBuilder_ == null) { - keys_ = builderForValue.build(); - onChanged(); - } else { - keysBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder mergeKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) { - if (keysBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && - keys_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance()) { - keys_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder(keys_).mergeFrom(value).buildPartial(); - } else { - keys_ = value; - } - onChanged(); - } else { - keysBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder clearKeys() { - if (keysBuilder_ == null) { - keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); - onChanged(); - } else { - keysBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder getKeysBuilder() { - bitField0_ |= 0x00000004; - onChanged(); - return getKeysFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() { - if (keysBuilder_ != null) { - return keysBuilder_.getMessageOrBuilder(); - } else { - return keys_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder> - getKeysFieldBuilder() { - if (keysBuilder_ == null) { - keysBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder>( - keys_, - getParentForChildren(), - isClean()); - keys_ = null; - } - return keysBuilder_; - } - - // @@protoc_insertion_point(builder_scope:DatanodeRegistrationProto) - } - - static { - defaultInstance = new DatanodeRegistrationProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:DatanodeRegistrationProto) - } - - public interface DatanodeCommandProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .DatanodeCommandProto.Type cmdType = 1; - boolean hasCmdType(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type getCmdType(); - - // optional .BalancerBandwidthCommandProto balancerCmd = 2; - boolean hasBalancerCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getBalancerCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder getBalancerCmdOrBuilder(); - - // optional .BlockCommandProto blkCmd = 3; - boolean hasBlkCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getBlkCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder getBlkCmdOrBuilder(); - - // optional .BlockRecoveryCommandProto recoveryCmd = 4; - boolean hasRecoveryCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getRecoveryCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder getRecoveryCmdOrBuilder(); - - // optional .FinalizeCommandProto finalizeCmd = 5; - boolean hasFinalizeCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getFinalizeCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder getFinalizeCmdOrBuilder(); - - // optional .KeyUpdateCommandProto keyUpdateCmd = 6; - boolean hasKeyUpdateCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getKeyUpdateCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder getKeyUpdateCmdOrBuilder(); - - // optional .RegisterCommandProto registerCmd = 7; - boolean hasRegisterCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getRegisterCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder getRegisterCmdOrBuilder(); - - // optional .UpgradeCommandProto upgradeCmd = 8; - boolean hasUpgradeCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getUpgradeCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getUpgradeCmdOrBuilder(); - } - public static final class DatanodeCommandProto extends - com.google.protobuf.GeneratedMessage - implements DatanodeCommandProtoOrBuilder { - // Use DatanodeCommandProto.newBuilder() to construct. - private DatanodeCommandProto(Builder builder) { - super(builder); - } - private DatanodeCommandProto(boolean noInit) {} - - private static final DatanodeCommandProto defaultInstance; - public static DatanodeCommandProto getDefaultInstance() { - return defaultInstance; - } - - public DatanodeCommandProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeCommandProto_fieldAccessorTable; - } - - public enum Type - implements com.google.protobuf.ProtocolMessageEnum { - BalancerBandwidthCommand(0, 0), - BlockCommand(1, 1), - BlockRecoveryCommand(2, 2), - FinalizeCommand(3, 3), - KeyUpdateCommand(4, 4), - RegisterCommand(5, 5), - UpgradeCommand(6, 6), - ; - - public static final int BalancerBandwidthCommand_VALUE = 0; - public static final int BlockCommand_VALUE = 1; - public static final int BlockRecoveryCommand_VALUE = 2; - public static final int FinalizeCommand_VALUE = 3; - public static final int KeyUpdateCommand_VALUE = 4; - public static final int RegisterCommand_VALUE = 5; - public static final int UpgradeCommand_VALUE = 6; - - - public final int getNumber() { return value; } - - public static Type valueOf(int value) { - switch (value) { - case 0: return BalancerBandwidthCommand; - case 1: return BlockCommand; - case 2: return BlockRecoveryCommand; - case 3: return FinalizeCommand; - case 4: return KeyUpdateCommand; - case 5: return RegisterCommand; - case 6: return UpgradeCommand; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public Type findValueByNumber(int number) { - return Type.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDescriptor().getEnumTypes().get(0); - } - - private static final Type[] VALUES = { - BalancerBandwidthCommand, BlockCommand, BlockRecoveryCommand, FinalizeCommand, KeyUpdateCommand, RegisterCommand, UpgradeCommand, - }; - - public static Type valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private Type(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:DatanodeCommandProto.Type) - } - - private int bitField0_; - // required .DatanodeCommandProto.Type cmdType = 1; - public static final int CMDTYPE_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type cmdType_; - public boolean hasCmdType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type getCmdType() { - return cmdType_; - } - - // optional .BalancerBandwidthCommandProto balancerCmd = 2; - public static final int BALANCERCMD_FIELD_NUMBER = 2; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto balancerCmd_; - public boolean hasBalancerCmd() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getBalancerCmd() { - return balancerCmd_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder getBalancerCmdOrBuilder() { - return balancerCmd_; - } - - // optional .BlockCommandProto blkCmd = 3; - public static final int BLKCMD_FIELD_NUMBER = 3; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto blkCmd_; - public boolean hasBlkCmd() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getBlkCmd() { - return blkCmd_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder getBlkCmdOrBuilder() { - return blkCmd_; - } - - // optional .BlockRecoveryCommandProto recoveryCmd = 4; - public static final int RECOVERYCMD_FIELD_NUMBER = 4; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto recoveryCmd_; - public boolean hasRecoveryCmd() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getRecoveryCmd() { - return recoveryCmd_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder getRecoveryCmdOrBuilder() { - return recoveryCmd_; - } - - // optional .FinalizeCommandProto finalizeCmd = 5; - public static final int FINALIZECMD_FIELD_NUMBER = 5; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto finalizeCmd_; - public boolean hasFinalizeCmd() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getFinalizeCmd() { - return finalizeCmd_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder getFinalizeCmdOrBuilder() { - return finalizeCmd_; - } - - // optional .KeyUpdateCommandProto keyUpdateCmd = 6; - public static final int KEYUPDATECMD_FIELD_NUMBER = 6; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto keyUpdateCmd_; - public boolean hasKeyUpdateCmd() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getKeyUpdateCmd() { - return keyUpdateCmd_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder getKeyUpdateCmdOrBuilder() { - return keyUpdateCmd_; - } - - // optional .RegisterCommandProto registerCmd = 7; - public static final int REGISTERCMD_FIELD_NUMBER = 7; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto registerCmd_; - public boolean hasRegisterCmd() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getRegisterCmd() { - return registerCmd_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder getRegisterCmdOrBuilder() { - return registerCmd_; - } - - // optional .UpgradeCommandProto upgradeCmd = 8; - public static final int UPGRADECMD_FIELD_NUMBER = 8; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto upgradeCmd_; - public boolean hasUpgradeCmd() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getUpgradeCmd() { - return upgradeCmd_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getUpgradeCmdOrBuilder() { - return upgradeCmd_; - } - - private void initFields() { - cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand; - balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); - blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance(); - recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance(); - finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance(); - keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); - registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance(); - upgradeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasCmdType()) { - memoizedIsInitialized = 0; - return false; - } - if (hasBalancerCmd()) { - if (!getBalancerCmd().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasBlkCmd()) { - if (!getBlkCmd().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasRecoveryCmd()) { - if (!getRecoveryCmd().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasFinalizeCmd()) { - if (!getFinalizeCmd().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasKeyUpdateCmd()) { - if (!getKeyUpdateCmd().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasUpgradeCmd()) { - if (!getUpgradeCmd().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, cmdType_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, balancerCmd_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(3, blkCmd_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeMessage(4, recoveryCmd_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeMessage(5, finalizeCmd_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeMessage(6, keyUpdateCmd_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeMessage(7, registerCmd_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - output.writeMessage(8, upgradeCmd_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, cmdType_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, balancerCmd_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, blkCmd_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, recoveryCmd_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(5, finalizeCmd_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(6, keyUpdateCmd_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(7, registerCmd_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(8, upgradeCmd_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto) obj; - - boolean result = true; - result = result && (hasCmdType() == other.hasCmdType()); - if (hasCmdType()) { - result = result && - (getCmdType() == other.getCmdType()); - } - result = result && (hasBalancerCmd() == other.hasBalancerCmd()); - if (hasBalancerCmd()) { - result = result && getBalancerCmd() - .equals(other.getBalancerCmd()); - } - result = result && (hasBlkCmd() == other.hasBlkCmd()); - if (hasBlkCmd()) { - result = result && getBlkCmd() - .equals(other.getBlkCmd()); - } - result = result && (hasRecoveryCmd() == other.hasRecoveryCmd()); - if (hasRecoveryCmd()) { - result = result && getRecoveryCmd() - .equals(other.getRecoveryCmd()); - } - result = result && (hasFinalizeCmd() == other.hasFinalizeCmd()); - if (hasFinalizeCmd()) { - result = result && getFinalizeCmd() - .equals(other.getFinalizeCmd()); - } - result = result && (hasKeyUpdateCmd() == other.hasKeyUpdateCmd()); - if (hasKeyUpdateCmd()) { - result = result && getKeyUpdateCmd() - .equals(other.getKeyUpdateCmd()); - } - result = result && (hasRegisterCmd() == other.hasRegisterCmd()); - if (hasRegisterCmd()) { - result = result && getRegisterCmd() - .equals(other.getRegisterCmd()); - } - result = result && (hasUpgradeCmd() == other.hasUpgradeCmd()); - if (hasUpgradeCmd()) { - result = result && getUpgradeCmd() - .equals(other.getUpgradeCmd()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasCmdType()) { - hash = (37 * hash) + CMDTYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getCmdType()); - } - if (hasBalancerCmd()) { - hash = (37 * hash) + BALANCERCMD_FIELD_NUMBER; - hash = (53 * hash) + getBalancerCmd().hashCode(); - } - if (hasBlkCmd()) { - hash = (37 * hash) + BLKCMD_FIELD_NUMBER; - hash = (53 * hash) + getBlkCmd().hashCode(); - } - if (hasRecoveryCmd()) { - hash = (37 * hash) + RECOVERYCMD_FIELD_NUMBER; - hash = (53 * hash) + getRecoveryCmd().hashCode(); - } - if (hasFinalizeCmd()) { - hash = (37 * hash) + FINALIZECMD_FIELD_NUMBER; - hash = (53 * hash) + getFinalizeCmd().hashCode(); - } - if (hasKeyUpdateCmd()) { - hash = (37 * hash) + KEYUPDATECMD_FIELD_NUMBER; - hash = (53 * hash) + getKeyUpdateCmd().hashCode(); - } - if (hasRegisterCmd()) { - hash = (37 * hash) + REGISTERCMD_FIELD_NUMBER; - hash = (53 * hash) + getRegisterCmd().hashCode(); - } - if (hasUpgradeCmd()) { - hash = (37 * hash) + UPGRADECMD_FIELD_NUMBER; - hash = (53 * hash) + getUpgradeCmd().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeCommandProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBalancerCmdFieldBuilder(); - getBlkCmdFieldBuilder(); - getRecoveryCmdFieldBuilder(); - getFinalizeCmdFieldBuilder(); - getKeyUpdateCmdFieldBuilder(); - getRegisterCmdFieldBuilder(); - getUpgradeCmdFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand; - bitField0_ = (bitField0_ & ~0x00000001); - if (balancerCmdBuilder_ == null) { - balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); - } else { - balancerCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - if (blkCmdBuilder_ == null) { - blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance(); - } else { - blkCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - if (recoveryCmdBuilder_ == null) { - recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance(); - } else { - recoveryCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - if (finalizeCmdBuilder_ == null) { - finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance(); - } else { - finalizeCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000010); - if (keyUpdateCmdBuilder_ == null) { - keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); - } else { - keyUpdateCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000020); - if (registerCmdBuilder_ == null) { - registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance(); - } else { - registerCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000040); - if (upgradeCmdBuilder_ == null) { - upgradeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance(); - } else { - upgradeCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000080); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.cmdType_ = cmdType_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (balancerCmdBuilder_ == null) { - result.balancerCmd_ = balancerCmd_; - } else { - result.balancerCmd_ = balancerCmdBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - if (blkCmdBuilder_ == null) { - result.blkCmd_ = blkCmd_; - } else { - result.blkCmd_ = blkCmdBuilder_.build(); - } - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - if (recoveryCmdBuilder_ == null) { - result.recoveryCmd_ = recoveryCmd_; - } else { - result.recoveryCmd_ = recoveryCmdBuilder_.build(); - } - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - if (finalizeCmdBuilder_ == null) { - result.finalizeCmd_ = finalizeCmd_; - } else { - result.finalizeCmd_ = finalizeCmdBuilder_.build(); - } - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - if (keyUpdateCmdBuilder_ == null) { - result.keyUpdateCmd_ = keyUpdateCmd_; - } else { - result.keyUpdateCmd_ = keyUpdateCmdBuilder_.build(); - } - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000040; - } - if (registerCmdBuilder_ == null) { - result.registerCmd_ = registerCmd_; - } else { - result.registerCmd_ = registerCmdBuilder_.build(); - } - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000080; - } - if (upgradeCmdBuilder_ == null) { - result.upgradeCmd_ = upgradeCmd_; - } else { - result.upgradeCmd_ = upgradeCmdBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance()) return this; - if (other.hasCmdType()) { - setCmdType(other.getCmdType()); - } - if (other.hasBalancerCmd()) { - mergeBalancerCmd(other.getBalancerCmd()); - } - if (other.hasBlkCmd()) { - mergeBlkCmd(other.getBlkCmd()); - } - if (other.hasRecoveryCmd()) { - mergeRecoveryCmd(other.getRecoveryCmd()); - } - if (other.hasFinalizeCmd()) { - mergeFinalizeCmd(other.getFinalizeCmd()); - } - if (other.hasKeyUpdateCmd()) { - mergeKeyUpdateCmd(other.getKeyUpdateCmd()); - } - if (other.hasRegisterCmd()) { - mergeRegisterCmd(other.getRegisterCmd()); - } - if (other.hasUpgradeCmd()) { - mergeUpgradeCmd(other.getUpgradeCmd()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasCmdType()) { - - return false; - } - if (hasBalancerCmd()) { - if (!getBalancerCmd().isInitialized()) { - - return false; - } - } - if (hasBlkCmd()) { - if (!getBlkCmd().isInitialized()) { - - return false; - } - } - if (hasRecoveryCmd()) { - if (!getRecoveryCmd().isInitialized()) { - - return false; - } - } - if (hasFinalizeCmd()) { - if (!getFinalizeCmd().isInitialized()) { - - return false; - } - } - if (hasKeyUpdateCmd()) { - if (!getKeyUpdateCmd().isInitialized()) { - - return false; - } - } - if (hasUpgradeCmd()) { - if (!getUpgradeCmd().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type value = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - cmdType_ = value; - } - break; - } - case 18: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.newBuilder(); - if (hasBalancerCmd()) { - subBuilder.mergeFrom(getBalancerCmd()); - } - input.readMessage(subBuilder, extensionRegistry); - setBalancerCmd(subBuilder.buildPartial()); - break; - } - case 26: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.newBuilder(); - if (hasBlkCmd()) { - subBuilder.mergeFrom(getBlkCmd()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlkCmd(subBuilder.buildPartial()); - break; - } - case 34: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.newBuilder(); - if (hasRecoveryCmd()) { - subBuilder.mergeFrom(getRecoveryCmd()); - } - input.readMessage(subBuilder, extensionRegistry); - setRecoveryCmd(subBuilder.buildPartial()); - break; - } - case 42: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.newBuilder(); - if (hasFinalizeCmd()) { - subBuilder.mergeFrom(getFinalizeCmd()); - } - input.readMessage(subBuilder, extensionRegistry); - setFinalizeCmd(subBuilder.buildPartial()); - break; - } - case 50: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.newBuilder(); - if (hasKeyUpdateCmd()) { - subBuilder.mergeFrom(getKeyUpdateCmd()); - } - input.readMessage(subBuilder, extensionRegistry); - setKeyUpdateCmd(subBuilder.buildPartial()); - break; - } - case 58: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.newBuilder(); - if (hasRegisterCmd()) { - subBuilder.mergeFrom(getRegisterCmd()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegisterCmd(subBuilder.buildPartial()); - break; - } - case 66: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder(); - if (hasUpgradeCmd()) { - subBuilder.mergeFrom(getUpgradeCmd()); - } - input.readMessage(subBuilder, extensionRegistry); - setUpgradeCmd(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .DatanodeCommandProto.Type cmdType = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand; - public boolean hasCmdType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type getCmdType() { - return cmdType_; - } - public Builder setCmdType(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - cmdType_ = value; - onChanged(); - return this; - } - public Builder clearCmdType() { - bitField0_ = (bitField0_ & ~0x00000001); - cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand; - onChanged(); - return this; - } - - // optional .BalancerBandwidthCommandProto balancerCmd = 2; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder> balancerCmdBuilder_; - public boolean hasBalancerCmd() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getBalancerCmd() { - if (balancerCmdBuilder_ == null) { - return balancerCmd_; - } else { - return balancerCmdBuilder_.getMessage(); - } - } - public Builder setBalancerCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto value) { - if (balancerCmdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - balancerCmd_ = value; - onChanged(); - } else { - balancerCmdBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setBalancerCmd( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder builderForValue) { - if (balancerCmdBuilder_ == null) { - balancerCmd_ = builderForValue.build(); - onChanged(); - } else { - balancerCmdBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergeBalancerCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto value) { - if (balancerCmdBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - balancerCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance()) { - balancerCmd_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.newBuilder(balancerCmd_).mergeFrom(value).buildPartial(); - } else { - balancerCmd_ = value; - } - onChanged(); - } else { - balancerCmdBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearBalancerCmd() { - if (balancerCmdBuilder_ == null) { - balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); - onChanged(); - } else { - balancerCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder getBalancerCmdBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getBalancerCmdFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder getBalancerCmdOrBuilder() { - if (balancerCmdBuilder_ != null) { - return balancerCmdBuilder_.getMessageOrBuilder(); - } else { - return balancerCmd_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder> - getBalancerCmdFieldBuilder() { - if (balancerCmdBuilder_ == null) { - balancerCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder>( - balancerCmd_, - getParentForChildren(), - isClean()); - balancerCmd_ = null; - } - return balancerCmdBuilder_; - } - - // optional .BlockCommandProto blkCmd = 3; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder> blkCmdBuilder_; - public boolean hasBlkCmd() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getBlkCmd() { - if (blkCmdBuilder_ == null) { - return blkCmd_; - } else { - return blkCmdBuilder_.getMessage(); - } - } - public Builder setBlkCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto value) { - if (blkCmdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - blkCmd_ = value; - onChanged(); - } else { - blkCmdBuilder_.setMessage(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder setBlkCmd( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder builderForValue) { - if (blkCmdBuilder_ == null) { - blkCmd_ = builderForValue.build(); - onChanged(); - } else { - blkCmdBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder mergeBlkCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto value) { - if (blkCmdBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && - blkCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance()) { - blkCmd_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.newBuilder(blkCmd_).mergeFrom(value).buildPartial(); - } else { - blkCmd_ = value; - } - onChanged(); - } else { - blkCmdBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder clearBlkCmd() { - if (blkCmdBuilder_ == null) { - blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance(); - onChanged(); - } else { - blkCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder getBlkCmdBuilder() { - bitField0_ |= 0x00000004; - onChanged(); - return getBlkCmdFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder getBlkCmdOrBuilder() { - if (blkCmdBuilder_ != null) { - return blkCmdBuilder_.getMessageOrBuilder(); - } else { - return blkCmd_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder> - getBlkCmdFieldBuilder() { - if (blkCmdBuilder_ == null) { - blkCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder>( - blkCmd_, - getParentForChildren(), - isClean()); - blkCmd_ = null; - } - return blkCmdBuilder_; - } - - // optional .BlockRecoveryCommandProto recoveryCmd = 4; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder> recoveryCmdBuilder_; - public boolean hasRecoveryCmd() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getRecoveryCmd() { - if (recoveryCmdBuilder_ == null) { - return recoveryCmd_; - } else { - return recoveryCmdBuilder_.getMessage(); - } - } - public Builder setRecoveryCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto value) { - if (recoveryCmdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - recoveryCmd_ = value; - onChanged(); - } else { - recoveryCmdBuilder_.setMessage(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder setRecoveryCmd( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder builderForValue) { - if (recoveryCmdBuilder_ == null) { - recoveryCmd_ = builderForValue.build(); - onChanged(); - } else { - recoveryCmdBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder mergeRecoveryCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto value) { - if (recoveryCmdBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008) && - recoveryCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance()) { - recoveryCmd_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.newBuilder(recoveryCmd_).mergeFrom(value).buildPartial(); - } else { - recoveryCmd_ = value; - } - onChanged(); - } else { - recoveryCmdBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder clearRecoveryCmd() { - if (recoveryCmdBuilder_ == null) { - recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance(); - onChanged(); - } else { - recoveryCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder getRecoveryCmdBuilder() { - bitField0_ |= 0x00000008; - onChanged(); - return getRecoveryCmdFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder getRecoveryCmdOrBuilder() { - if (recoveryCmdBuilder_ != null) { - return recoveryCmdBuilder_.getMessageOrBuilder(); - } else { - return recoveryCmd_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder> - getRecoveryCmdFieldBuilder() { - if (recoveryCmdBuilder_ == null) { - recoveryCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder>( - recoveryCmd_, - getParentForChildren(), - isClean()); - recoveryCmd_ = null; - } - return recoveryCmdBuilder_; - } - - // optional .FinalizeCommandProto finalizeCmd = 5; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder> finalizeCmdBuilder_; - public boolean hasFinalizeCmd() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getFinalizeCmd() { - if (finalizeCmdBuilder_ == null) { - return finalizeCmd_; - } else { - return finalizeCmdBuilder_.getMessage(); - } - } - public Builder setFinalizeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto value) { - if (finalizeCmdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - finalizeCmd_ = value; - onChanged(); - } else { - finalizeCmdBuilder_.setMessage(value); - } - bitField0_ |= 0x00000010; - return this; - } - public Builder setFinalizeCmd( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder builderForValue) { - if (finalizeCmdBuilder_ == null) { - finalizeCmd_ = builderForValue.build(); - onChanged(); - } else { - finalizeCmdBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000010; - return this; - } - public Builder mergeFinalizeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto value) { - if (finalizeCmdBuilder_ == null) { - if (((bitField0_ & 0x00000010) == 0x00000010) && - finalizeCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance()) { - finalizeCmd_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.newBuilder(finalizeCmd_).mergeFrom(value).buildPartial(); - } else { - finalizeCmd_ = value; - } - onChanged(); - } else { - finalizeCmdBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000010; - return this; - } - public Builder clearFinalizeCmd() { - if (finalizeCmdBuilder_ == null) { - finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance(); - onChanged(); - } else { - finalizeCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder getFinalizeCmdBuilder() { - bitField0_ |= 0x00000010; - onChanged(); - return getFinalizeCmdFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder getFinalizeCmdOrBuilder() { - if (finalizeCmdBuilder_ != null) { - return finalizeCmdBuilder_.getMessageOrBuilder(); - } else { - return finalizeCmd_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder> - getFinalizeCmdFieldBuilder() { - if (finalizeCmdBuilder_ == null) { - finalizeCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder>( - finalizeCmd_, - getParentForChildren(), - isClean()); - finalizeCmd_ = null; - } - return finalizeCmdBuilder_; - } - - // optional .KeyUpdateCommandProto keyUpdateCmd = 6; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder> keyUpdateCmdBuilder_; - public boolean hasKeyUpdateCmd() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getKeyUpdateCmd() { - if (keyUpdateCmdBuilder_ == null) { - return keyUpdateCmd_; - } else { - return keyUpdateCmdBuilder_.getMessage(); - } - } - public Builder setKeyUpdateCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto value) { - if (keyUpdateCmdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - keyUpdateCmd_ = value; - onChanged(); - } else { - keyUpdateCmdBuilder_.setMessage(value); - } - bitField0_ |= 0x00000020; - return this; - } - public Builder setKeyUpdateCmd( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder builderForValue) { - if (keyUpdateCmdBuilder_ == null) { - keyUpdateCmd_ = builderForValue.build(); - onChanged(); - } else { - keyUpdateCmdBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000020; - return this; - } - public Builder mergeKeyUpdateCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto value) { - if (keyUpdateCmdBuilder_ == null) { - if (((bitField0_ & 0x00000020) == 0x00000020) && - keyUpdateCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) { - keyUpdateCmd_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.newBuilder(keyUpdateCmd_).mergeFrom(value).buildPartial(); - } else { - keyUpdateCmd_ = value; - } - onChanged(); - } else { - keyUpdateCmdBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000020; - return this; - } - public Builder clearKeyUpdateCmd() { - if (keyUpdateCmdBuilder_ == null) { - keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); - onChanged(); - } else { - keyUpdateCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000020); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder getKeyUpdateCmdBuilder() { - bitField0_ |= 0x00000020; - onChanged(); - return getKeyUpdateCmdFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder getKeyUpdateCmdOrBuilder() { - if (keyUpdateCmdBuilder_ != null) { - return keyUpdateCmdBuilder_.getMessageOrBuilder(); - } else { - return keyUpdateCmd_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder> - getKeyUpdateCmdFieldBuilder() { - if (keyUpdateCmdBuilder_ == null) { - keyUpdateCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder>( - keyUpdateCmd_, - getParentForChildren(), - isClean()); - keyUpdateCmd_ = null; - } - return keyUpdateCmdBuilder_; - } - - // optional .RegisterCommandProto registerCmd = 7; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder> registerCmdBuilder_; - public boolean hasRegisterCmd() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getRegisterCmd() { - if (registerCmdBuilder_ == null) { - return registerCmd_; - } else { - return registerCmdBuilder_.getMessage(); - } - } - public Builder setRegisterCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto value) { - if (registerCmdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - registerCmd_ = value; - onChanged(); - } else { - registerCmdBuilder_.setMessage(value); - } - bitField0_ |= 0x00000040; - return this; - } - public Builder setRegisterCmd( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder builderForValue) { - if (registerCmdBuilder_ == null) { - registerCmd_ = builderForValue.build(); - onChanged(); - } else { - registerCmdBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000040; - return this; - } - public Builder mergeRegisterCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto value) { - if (registerCmdBuilder_ == null) { - if (((bitField0_ & 0x00000040) == 0x00000040) && - registerCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance()) { - registerCmd_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.newBuilder(registerCmd_).mergeFrom(value).buildPartial(); - } else { - registerCmd_ = value; - } - onChanged(); - } else { - registerCmdBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000040; - return this; - } - public Builder clearRegisterCmd() { - if (registerCmdBuilder_ == null) { - registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance(); - onChanged(); - } else { - registerCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000040); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder getRegisterCmdBuilder() { - bitField0_ |= 0x00000040; - onChanged(); - return getRegisterCmdFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder getRegisterCmdOrBuilder() { - if (registerCmdBuilder_ != null) { - return registerCmdBuilder_.getMessageOrBuilder(); - } else { - return registerCmd_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder> - getRegisterCmdFieldBuilder() { - if (registerCmdBuilder_ == null) { - registerCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder>( - registerCmd_, - getParentForChildren(), - isClean()); - registerCmd_ = null; - } - return registerCmdBuilder_; - } - - // optional .UpgradeCommandProto upgradeCmd = 8; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto upgradeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder> upgradeCmdBuilder_; - public boolean hasUpgradeCmd() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getUpgradeCmd() { - if (upgradeCmdBuilder_ == null) { - return upgradeCmd_; - } else { - return upgradeCmdBuilder_.getMessage(); - } - } - public Builder setUpgradeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) { - if (upgradeCmdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - upgradeCmd_ = value; - onChanged(); - } else { - upgradeCmdBuilder_.setMessage(value); - } - bitField0_ |= 0x00000080; - return this; - } - public Builder setUpgradeCmd( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder builderForValue) { - if (upgradeCmdBuilder_ == null) { - upgradeCmd_ = builderForValue.build(); - onChanged(); - } else { - upgradeCmdBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000080; - return this; - } - public Builder mergeUpgradeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) { - if (upgradeCmdBuilder_ == null) { - if (((bitField0_ & 0x00000080) == 0x00000080) && - upgradeCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance()) { - upgradeCmd_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder(upgradeCmd_).mergeFrom(value).buildPartial(); - } else { - upgradeCmd_ = value; - } - onChanged(); - } else { - upgradeCmdBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000080; - return this; - } - public Builder clearUpgradeCmd() { - if (upgradeCmdBuilder_ == null) { - upgradeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance(); - onChanged(); - } else { - upgradeCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000080); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder getUpgradeCmdBuilder() { - bitField0_ |= 0x00000080; - onChanged(); - return getUpgradeCmdFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getUpgradeCmdOrBuilder() { - if (upgradeCmdBuilder_ != null) { - return upgradeCmdBuilder_.getMessageOrBuilder(); - } else { - return upgradeCmd_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder> - getUpgradeCmdFieldBuilder() { - if (upgradeCmdBuilder_ == null) { - upgradeCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder>( - upgradeCmd_, - getParentForChildren(), - isClean()); - upgradeCmd_ = null; - } - return upgradeCmdBuilder_; - } - - // @@protoc_insertion_point(builder_scope:DatanodeCommandProto) - } - - static { - defaultInstance = new DatanodeCommandProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:DatanodeCommandProto) - } - - public interface BalancerBandwidthCommandProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint64 bandwidth = 1; - boolean hasBandwidth(); - long getBandwidth(); - } - public static final class BalancerBandwidthCommandProto extends - com.google.protobuf.GeneratedMessage - implements BalancerBandwidthCommandProtoOrBuilder { - // Use BalancerBandwidthCommandProto.newBuilder() to construct. - private BalancerBandwidthCommandProto(Builder builder) { - super(builder); - } - private BalancerBandwidthCommandProto(boolean noInit) {} - - private static final BalancerBandwidthCommandProto defaultInstance; - public static BalancerBandwidthCommandProto getDefaultInstance() { - return defaultInstance; - } - - public BalancerBandwidthCommandProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BalancerBandwidthCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BalancerBandwidthCommandProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint64 bandwidth = 1; - public static final int BANDWIDTH_FIELD_NUMBER = 1; - private long bandwidth_; - public boolean hasBandwidth() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getBandwidth() { - return bandwidth_; - } - - private void initFields() { - bandwidth_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBandwidth()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, bandwidth_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, bandwidth_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto) obj; - - boolean result = true; - result = result && (hasBandwidth() == other.hasBandwidth()); - if (hasBandwidth()) { - result = result && (getBandwidth() - == other.getBandwidth()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBandwidth()) { - hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getBandwidth()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BalancerBandwidthCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BalancerBandwidthCommandProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - bandwidth_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.bandwidth_ = bandwidth_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance()) return this; - if (other.hasBandwidth()) { - setBandwidth(other.getBandwidth()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBandwidth()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - bandwidth_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required uint64 bandwidth = 1; - private long bandwidth_ ; - public boolean hasBandwidth() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getBandwidth() { - return bandwidth_; - } - public Builder setBandwidth(long value) { - bitField0_ |= 0x00000001; - bandwidth_ = value; - onChanged(); - return this; - } - public Builder clearBandwidth() { - bitField0_ = (bitField0_ & ~0x00000001); - bandwidth_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:BalancerBandwidthCommandProto) - } - - static { - defaultInstance = new BalancerBandwidthCommandProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:BalancerBandwidthCommandProto) - } - - public interface BlockCommandProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .BlockCommandProto.Action action = 1; - boolean hasAction(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action getAction(); - - // required string blockPoolId = 2; - boolean hasBlockPoolId(); - String getBlockPoolId(); - - // repeated .BlockProto blocks = 3; - java.util.List - getBlocksList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index); - int getBlocksCount(); - java.util.List - getBlocksOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder( - int index); - - // repeated .DatanodeInfosProto targets = 4; - java.util.List - getTargetsList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getTargets(int index); - int getTargetsCount(); - java.util.List - getTargetsOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getTargetsOrBuilder( - int index); - } - public static final class BlockCommandProto extends - com.google.protobuf.GeneratedMessage - implements BlockCommandProtoOrBuilder { - // Use BlockCommandProto.newBuilder() to construct. - private BlockCommandProto(Builder builder) { - super(builder); - } - private BlockCommandProto(boolean noInit) {} - - private static final BlockCommandProto defaultInstance; - public static BlockCommandProto getDefaultInstance() { - return defaultInstance; - } - - public BlockCommandProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockCommandProto_fieldAccessorTable; - } - - public enum Action - implements com.google.protobuf.ProtocolMessageEnum { - TRANSFER(0, 1), - INVALIDATE(1, 2), - ; - - public static final int TRANSFER_VALUE = 1; - public static final int INVALIDATE_VALUE = 2; - - - public final int getNumber() { return value; } - - public static Action valueOf(int value) { - switch (value) { - case 1: return TRANSFER; - case 2: return INVALIDATE; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public Action findValueByNumber(int number) { - return Action.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDescriptor().getEnumTypes().get(0); - } - - private static final Action[] VALUES = { - TRANSFER, INVALIDATE, - }; - - public static Action valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private Action(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:BlockCommandProto.Action) - } - - private int bitField0_; - // required .BlockCommandProto.Action action = 1; - public static final int ACTION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action action_; - public boolean hasAction() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action getAction() { - return action_; - } - - // required string blockPoolId = 2; - public static final int BLOCKPOOLID_FIELD_NUMBER = 2; - private java.lang.Object blockPoolId_; - public boolean hasBlockPoolId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getBlockPoolId() { - java.lang.Object ref = blockPoolId_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - blockPoolId_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getBlockPoolIdBytes() { - java.lang.Object ref = blockPoolId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - blockPoolId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // repeated .BlockProto blocks = 3; - public static final int BLOCKS_FIELD_NUMBER = 3; - private java.util.List blocks_; - public java.util.List getBlocksList() { - return blocks_; - } - public java.util.List - getBlocksOrBuilderList() { - return blocks_; - } - public int getBlocksCount() { - return blocks_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) { - return blocks_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder( - int index) { - return blocks_.get(index); - } - - // repeated .DatanodeInfosProto targets = 4; - public static final int TARGETS_FIELD_NUMBER = 4; - private java.util.List targets_; - public java.util.List getTargetsList() { - return targets_; - } - public java.util.List - getTargetsOrBuilderList() { - return targets_; - } - public int getTargetsCount() { - return targets_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getTargets(int index) { - return targets_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getTargetsOrBuilder( - int index) { - return targets_.get(index); - } - - private void initFields() { - action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.TRANSFER; - blockPoolId_ = ""; - blocks_ = java.util.Collections.emptyList(); - targets_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasAction()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBlockPoolId()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getBlocksCount(); i++) { - if (!getBlocks(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - for (int i = 0; i < getTargetsCount(); i++) { - if (!getTargets(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, action_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getBlockPoolIdBytes()); - } - for (int i = 0; i < blocks_.size(); i++) { - output.writeMessage(3, blocks_.get(i)); - } - for (int i = 0; i < targets_.size(); i++) { - output.writeMessage(4, targets_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, action_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getBlockPoolIdBytes()); - } - for (int i = 0; i < blocks_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, blocks_.get(i)); - } - for (int i = 0; i < targets_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, targets_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto) obj; - - boolean result = true; - result = result && (hasAction() == other.hasAction()); - if (hasAction()) { - result = result && - (getAction() == other.getAction()); - } - result = result && (hasBlockPoolId() == other.hasBlockPoolId()); - if (hasBlockPoolId()) { - result = result && getBlockPoolId() - .equals(other.getBlockPoolId()); - } - result = result && getBlocksList() - .equals(other.getBlocksList()); - result = result && getTargetsList() - .equals(other.getTargetsList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasAction()) { - hash = (37 * hash) + ACTION_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getAction()); - } - if (hasBlockPoolId()) { - hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; - hash = (53 * hash) + getBlockPoolId().hashCode(); - } - if (getBlocksCount() > 0) { - hash = (37 * hash) + BLOCKS_FIELD_NUMBER; - hash = (53 * hash) + getBlocksList().hashCode(); - } - if (getTargetsCount() > 0) { - hash = (37 * hash) + TARGETS_FIELD_NUMBER; - hash = (53 * hash) + getTargetsList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockCommandProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlocksFieldBuilder(); - getTargetsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.TRANSFER; - bitField0_ = (bitField0_ & ~0x00000001); - blockPoolId_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - if (blocksBuilder_ == null) { - blocks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - } else { - blocksBuilder_.clear(); - } - if (targetsBuilder_ == null) { - targets_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - } else { - targetsBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.action_ = action_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.blockPoolId_ = blockPoolId_; - if (blocksBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004)) { - blocks_ = java.util.Collections.unmodifiableList(blocks_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.blocks_ = blocks_; - } else { - result.blocks_ = blocksBuilder_.build(); - } - if (targetsBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008)) { - targets_ = java.util.Collections.unmodifiableList(targets_); - bitField0_ = (bitField0_ & ~0x00000008); - } - result.targets_ = targets_; - } else { - result.targets_ = targetsBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance()) return this; - if (other.hasAction()) { - setAction(other.getAction()); - } - if (other.hasBlockPoolId()) { - setBlockPoolId(other.getBlockPoolId()); - } - if (blocksBuilder_ == null) { - if (!other.blocks_.isEmpty()) { - if (blocks_.isEmpty()) { - blocks_ = other.blocks_; - bitField0_ = (bitField0_ & ~0x00000004); - } else { - ensureBlocksIsMutable(); - blocks_.addAll(other.blocks_); - } - onChanged(); - } - } else { - if (!other.blocks_.isEmpty()) { - if (blocksBuilder_.isEmpty()) { - blocksBuilder_.dispose(); - blocksBuilder_ = null; - blocks_ = other.blocks_; - bitField0_ = (bitField0_ & ~0x00000004); - blocksBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getBlocksFieldBuilder() : null; - } else { - blocksBuilder_.addAllMessages(other.blocks_); - } - } - } - if (targetsBuilder_ == null) { - if (!other.targets_.isEmpty()) { - if (targets_.isEmpty()) { - targets_ = other.targets_; - bitField0_ = (bitField0_ & ~0x00000008); - } else { - ensureTargetsIsMutable(); - targets_.addAll(other.targets_); - } - onChanged(); - } - } else { - if (!other.targets_.isEmpty()) { - if (targetsBuilder_.isEmpty()) { - targetsBuilder_.dispose(); - targetsBuilder_ = null; - targets_ = other.targets_; - bitField0_ = (bitField0_ & ~0x00000008); - targetsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getTargetsFieldBuilder() : null; - } else { - targetsBuilder_.addAllMessages(other.targets_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasAction()) { - - return false; - } - if (!hasBlockPoolId()) { - - return false; - } - for (int i = 0; i < getBlocksCount(); i++) { - if (!getBlocks(i).isInitialized()) { - - return false; - } - } - for (int i = 0; i < getTargetsCount(); i++) { - if (!getTargets(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action value = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - action_ = value; - } - break; - } - case 18: { - bitField0_ |= 0x00000002; - blockPoolId_ = input.readBytes(); - break; - } - case 26: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addBlocks(subBuilder.buildPartial()); - break; - } - case 34: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addTargets(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .BlockCommandProto.Action action = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.TRANSFER; - public boolean hasAction() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action getAction() { - return action_; - } - public Builder setAction(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - action_ = value; - onChanged(); - return this; - } - public Builder clearAction() { - bitField0_ = (bitField0_ & ~0x00000001); - action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.TRANSFER; - onChanged(); - return this; - } - - // required string blockPoolId = 2; - private java.lang.Object blockPoolId_ = ""; - public boolean hasBlockPoolId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getBlockPoolId() { - java.lang.Object ref = blockPoolId_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - blockPoolId_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setBlockPoolId(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - blockPoolId_ = value; - onChanged(); - return this; - } - public Builder clearBlockPoolId() { - bitField0_ = (bitField0_ & ~0x00000002); - blockPoolId_ = getDefaultInstance().getBlockPoolId(); - onChanged(); - return this; - } - void setBlockPoolId(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - blockPoolId_ = value; - onChanged(); - } - - // repeated .BlockProto blocks = 3; - private java.util.List blocks_ = - java.util.Collections.emptyList(); - private void ensureBlocksIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - blocks_ = new java.util.ArrayList(blocks_); - bitField0_ |= 0x00000004; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blocksBuilder_; - - public java.util.List getBlocksList() { - if (blocksBuilder_ == null) { - return java.util.Collections.unmodifiableList(blocks_); - } else { - return blocksBuilder_.getMessageList(); - } - } - public int getBlocksCount() { - if (blocksBuilder_ == null) { - return blocks_.size(); - } else { - return blocksBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) { - if (blocksBuilder_ == null) { - return blocks_.get(index); - } else { - return blocksBuilder_.getMessage(index); - } - } - public Builder setBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.set(index, value); - onChanged(); - } else { - blocksBuilder_.setMessage(index, value); - } - return this; - } - public Builder setBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.set(index, builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.add(value); - onChanged(); - } else { - blocksBuilder_.addMessage(value); - } - return this; - } - public Builder addBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.add(index, value); - onChanged(); - } else { - blocksBuilder_.addMessage(index, value); - } - return this; - } - public Builder addBlocks( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.add(builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.add(index, builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllBlocks( - java.lang.Iterable values) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - super.addAll(values, blocks_); - onChanged(); - } else { - blocksBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearBlocks() { - if (blocksBuilder_ == null) { - blocks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - } else { - blocksBuilder_.clear(); - } - return this; - } - public Builder removeBlocks(int index) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.remove(index); - onChanged(); - } else { - blocksBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlocksBuilder( - int index) { - return getBlocksFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder( - int index) { - if (blocksBuilder_ == null) { - return blocks_.get(index); } else { - return blocksBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getBlocksOrBuilderList() { - if (blocksBuilder_ != null) { - return blocksBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(blocks_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder() { - return getBlocksFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder( - int index) { - return getBlocksFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()); - } - public java.util.List - getBlocksBuilderList() { - return getBlocksFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> - getBlocksFieldBuilder() { - if (blocksBuilder_ == null) { - blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>( - blocks_, - ((bitField0_ & 0x00000004) == 0x00000004), - getParentForChildren(), - isClean()); - blocks_ = null; - } - return blocksBuilder_; - } - - // repeated .DatanodeInfosProto targets = 4; - private java.util.List targets_ = - java.util.Collections.emptyList(); - private void ensureTargetsIsMutable() { - if (!((bitField0_ & 0x00000008) == 0x00000008)) { - targets_ = new java.util.ArrayList(targets_); - bitField0_ |= 0x00000008; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder> targetsBuilder_; - - public java.util.List getTargetsList() { - if (targetsBuilder_ == null) { - return java.util.Collections.unmodifiableList(targets_); - } else { - return targetsBuilder_.getMessageList(); - } - } - public int getTargetsCount() { - if (targetsBuilder_ == null) { - return targets_.size(); - } else { - return targetsBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getTargets(int index) { - if (targetsBuilder_ == null) { - return targets_.get(index); - } else { - return targetsBuilder_.getMessage(index); - } - } - public Builder setTargets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) { - if (targetsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTargetsIsMutable(); - targets_.set(index, value); - onChanged(); - } else { - targetsBuilder_.setMessage(index, value); - } - return this; - } - public Builder setTargets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder builderForValue) { - if (targetsBuilder_ == null) { - ensureTargetsIsMutable(); - targets_.set(index, builderForValue.build()); - onChanged(); - } else { - targetsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) { - if (targetsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTargetsIsMutable(); - targets_.add(value); - onChanged(); - } else { - targetsBuilder_.addMessage(value); - } - return this; - } - public Builder addTargets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) { - if (targetsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTargetsIsMutable(); - targets_.add(index, value); - onChanged(); - } else { - targetsBuilder_.addMessage(index, value); - } - return this; - } - public Builder addTargets( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder builderForValue) { - if (targetsBuilder_ == null) { - ensureTargetsIsMutable(); - targets_.add(builderForValue.build()); - onChanged(); - } else { - targetsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addTargets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder builderForValue) { - if (targetsBuilder_ == null) { - ensureTargetsIsMutable(); - targets_.add(index, builderForValue.build()); - onChanged(); - } else { - targetsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllTargets( - java.lang.Iterable values) { - if (targetsBuilder_ == null) { - ensureTargetsIsMutable(); - super.addAll(values, targets_); - onChanged(); - } else { - targetsBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearTargets() { - if (targetsBuilder_ == null) { - targets_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - onChanged(); - } else { - targetsBuilder_.clear(); - } - return this; - } - public Builder removeTargets(int index) { - if (targetsBuilder_ == null) { - ensureTargetsIsMutable(); - targets_.remove(index); - onChanged(); - } else { - targetsBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder getTargetsBuilder( - int index) { - return getTargetsFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getTargetsOrBuilder( - int index) { - if (targetsBuilder_ == null) { - return targets_.get(index); } else { - return targetsBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getTargetsOrBuilderList() { - if (targetsBuilder_ != null) { - return targetsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(targets_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder addTargetsBuilder() { - return getTargetsFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder addTargetsBuilder( - int index) { - return getTargetsFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance()); - } - public java.util.List - getTargetsBuilderList() { - return getTargetsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder> - getTargetsFieldBuilder() { - if (targetsBuilder_ == null) { - targetsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>( - targets_, - ((bitField0_ & 0x00000008) == 0x00000008), - getParentForChildren(), - isClean()); - targets_ = null; - } - return targetsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:BlockCommandProto) - } - - static { - defaultInstance = new BlockCommandProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:BlockCommandProto) - } - - public interface BlockRecoveryCommandProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .RecoveringBlockProto blocks = 1; - java.util.List - getBlocksList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlocks(int index); - int getBlocksCount(); - java.util.List - getBlocksOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlocksOrBuilder( - int index); - } - public static final class BlockRecoveryCommandProto extends - com.google.protobuf.GeneratedMessage - implements BlockRecoveryCommandProtoOrBuilder { - // Use BlockRecoveryCommandProto.newBuilder() to construct. - private BlockRecoveryCommandProto(Builder builder) { - super(builder); - } - private BlockRecoveryCommandProto(boolean noInit) {} - - private static final BlockRecoveryCommandProto defaultInstance; - public static BlockRecoveryCommandProto getDefaultInstance() { - return defaultInstance; - } - - public BlockRecoveryCommandProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockRecoveryCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockRecoveryCommandProto_fieldAccessorTable; - } - - // repeated .RecoveringBlockProto blocks = 1; - public static final int BLOCKS_FIELD_NUMBER = 1; - private java.util.List blocks_; - public java.util.List getBlocksList() { - return blocks_; - } - public java.util.List - getBlocksOrBuilderList() { - return blocks_; - } - public int getBlocksCount() { - return blocks_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlocks(int index) { - return blocks_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlocksOrBuilder( - int index) { - return blocks_.get(index); - } - - private void initFields() { - blocks_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getBlocksCount(); i++) { - if (!getBlocks(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < blocks_.size(); i++) { - output.writeMessage(1, blocks_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < blocks_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, blocks_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto) obj; - - boolean result = true; - result = result && getBlocksList() - .equals(other.getBlocksList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getBlocksCount() > 0) { - hash = (37 * hash) + BLOCKS_FIELD_NUMBER; - hash = (53 * hash) + getBlocksList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockRecoveryCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockRecoveryCommandProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlocksFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blocksBuilder_ == null) { - blocks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - blocksBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto(this); - int from_bitField0_ = bitField0_; - if (blocksBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - blocks_ = java.util.Collections.unmodifiableList(blocks_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.blocks_ = blocks_; - } else { - result.blocks_ = blocksBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance()) return this; - if (blocksBuilder_ == null) { - if (!other.blocks_.isEmpty()) { - if (blocks_.isEmpty()) { - blocks_ = other.blocks_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureBlocksIsMutable(); - blocks_.addAll(other.blocks_); - } - onChanged(); - } - } else { - if (!other.blocks_.isEmpty()) { - if (blocksBuilder_.isEmpty()) { - blocksBuilder_.dispose(); - blocksBuilder_ = null; - blocks_ = other.blocks_; - bitField0_ = (bitField0_ & ~0x00000001); - blocksBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getBlocksFieldBuilder() : null; - } else { - blocksBuilder_.addAllMessages(other.blocks_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getBlocksCount(); i++) { - if (!getBlocks(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addBlocks(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // repeated .RecoveringBlockProto blocks = 1; - private java.util.List blocks_ = - java.util.Collections.emptyList(); - private void ensureBlocksIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - blocks_ = new java.util.ArrayList(blocks_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder> blocksBuilder_; - - public java.util.List getBlocksList() { - if (blocksBuilder_ == null) { - return java.util.Collections.unmodifiableList(blocks_); - } else { - return blocksBuilder_.getMessageList(); - } - } - public int getBlocksCount() { - if (blocksBuilder_ == null) { - return blocks_.size(); - } else { - return blocksBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlocks(int index) { - if (blocksBuilder_ == null) { - return blocks_.get(index); - } else { - return blocksBuilder_.getMessage(index); - } - } - public Builder setBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.set(index, value); - onChanged(); - } else { - blocksBuilder_.setMessage(index, value); - } - return this; - } - public Builder setBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.set(index, builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.add(value); - onChanged(); - } else { - blocksBuilder_.addMessage(value); - } - return this; - } - public Builder addBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.add(index, value); - onChanged(); - } else { - blocksBuilder_.addMessage(index, value); - } - return this; - } - public Builder addBlocks( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.add(builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.add(index, builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllBlocks( - java.lang.Iterable values) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - super.addAll(values, blocks_); - onChanged(); - } else { - blocksBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearBlocks() { - if (blocksBuilder_ == null) { - blocks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - blocksBuilder_.clear(); - } - return this; - } - public Builder removeBlocks(int index) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.remove(index); - onChanged(); - } else { - blocksBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder getBlocksBuilder( - int index) { - return getBlocksFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlocksOrBuilder( - int index) { - if (blocksBuilder_ == null) { - return blocks_.get(index); } else { - return blocksBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getBlocksOrBuilderList() { - if (blocksBuilder_ != null) { - return blocksBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(blocks_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder addBlocksBuilder() { - return getBlocksFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder addBlocksBuilder( - int index) { - return getBlocksFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance()); - } - public java.util.List - getBlocksBuilderList() { - return getBlocksFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder> - getBlocksFieldBuilder() { - if (blocksBuilder_ == null) { - blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder>( - blocks_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - blocks_ = null; - } - return blocksBuilder_; - } - - // @@protoc_insertion_point(builder_scope:BlockRecoveryCommandProto) - } - - static { - defaultInstance = new BlockRecoveryCommandProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:BlockRecoveryCommandProto) - } - - public interface FinalizeCommandProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string blockPoolId = 1; - boolean hasBlockPoolId(); - String getBlockPoolId(); - } - public static final class FinalizeCommandProto extends - com.google.protobuf.GeneratedMessage - implements FinalizeCommandProtoOrBuilder { - // Use FinalizeCommandProto.newBuilder() to construct. - private FinalizeCommandProto(Builder builder) { - super(builder); - } - private FinalizeCommandProto(boolean noInit) {} - - private static final FinalizeCommandProto defaultInstance; - public static FinalizeCommandProto getDefaultInstance() { - return defaultInstance; - } - - public FinalizeCommandProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_FinalizeCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_FinalizeCommandProto_fieldAccessorTable; - } - - private int bitField0_; - // required string blockPoolId = 1; - public static final int BLOCKPOOLID_FIELD_NUMBER = 1; - private java.lang.Object blockPoolId_; - public boolean hasBlockPoolId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getBlockPoolId() { - java.lang.Object ref = blockPoolId_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - blockPoolId_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getBlockPoolIdBytes() { - java.lang.Object ref = blockPoolId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - blockPoolId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - blockPoolId_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlockPoolId()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getBlockPoolIdBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getBlockPoolIdBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto) obj; - - boolean result = true; - result = result && (hasBlockPoolId() == other.hasBlockPoolId()); - if (hasBlockPoolId()) { - result = result && getBlockPoolId() - .equals(other.getBlockPoolId()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlockPoolId()) { - hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; - hash = (53 * hash) + getBlockPoolId().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_FinalizeCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_FinalizeCommandProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - blockPoolId_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.blockPoolId_ = blockPoolId_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance()) return this; - if (other.hasBlockPoolId()) { - setBlockPoolId(other.getBlockPoolId()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlockPoolId()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - blockPoolId_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string blockPoolId = 1; - private java.lang.Object blockPoolId_ = ""; - public boolean hasBlockPoolId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getBlockPoolId() { - java.lang.Object ref = blockPoolId_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - blockPoolId_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setBlockPoolId(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - blockPoolId_ = value; - onChanged(); - return this; - } - public Builder clearBlockPoolId() { - bitField0_ = (bitField0_ & ~0x00000001); - blockPoolId_ = getDefaultInstance().getBlockPoolId(); - onChanged(); - return this; - } - void setBlockPoolId(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - blockPoolId_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:FinalizeCommandProto) - } - - static { - defaultInstance = new FinalizeCommandProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:FinalizeCommandProto) - } - - public interface KeyUpdateCommandProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ExportedBlockKeysProto keys = 1; - boolean hasKeys(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder(); - } - public static final class KeyUpdateCommandProto extends - com.google.protobuf.GeneratedMessage - implements KeyUpdateCommandProtoOrBuilder { - // Use KeyUpdateCommandProto.newBuilder() to construct. - private KeyUpdateCommandProto(Builder builder) { - super(builder); - } - private KeyUpdateCommandProto(boolean noInit) {} - - private static final KeyUpdateCommandProto defaultInstance; - public static KeyUpdateCommandProto getDefaultInstance() { - return defaultInstance; - } - - public KeyUpdateCommandProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_KeyUpdateCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_KeyUpdateCommandProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ExportedBlockKeysProto keys = 1; - public static final int KEYS_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_; - public boolean hasKeys() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() { - return keys_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() { - return keys_; - } - - private void initFields() { - keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasKeys()) { - memoizedIsInitialized = 0; - return false; - } - if (!getKeys().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, keys_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, keys_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto) obj; - - boolean result = true; - result = result && (hasKeys() == other.hasKeys()); - if (hasKeys()) { - result = result && getKeys() - .equals(other.getKeys()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasKeys()) { - hash = (37 * hash) + KEYS_FIELD_NUMBER; - hash = (53 * hash) + getKeys().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_KeyUpdateCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_KeyUpdateCommandProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getKeysFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (keysBuilder_ == null) { - keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); - } else { - keysBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (keysBuilder_ == null) { - result.keys_ = keys_; - } else { - result.keys_ = keysBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) return this; - if (other.hasKeys()) { - mergeKeys(other.getKeys()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasKeys()) { - - return false; - } - if (!getKeys().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder(); - if (hasKeys()) { - subBuilder.mergeFrom(getKeys()); - } - input.readMessage(subBuilder, extensionRegistry); - setKeys(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ExportedBlockKeysProto keys = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder> keysBuilder_; - public boolean hasKeys() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() { - if (keysBuilder_ == null) { - return keys_; - } else { - return keysBuilder_.getMessage(); - } - } - public Builder setKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) { - if (keysBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - keys_ = value; - onChanged(); - } else { - keysBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setKeys( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder builderForValue) { - if (keysBuilder_ == null) { - keys_ = builderForValue.build(); - onChanged(); - } else { - keysBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) { - if (keysBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - keys_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance()) { - keys_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder(keys_).mergeFrom(value).buildPartial(); - } else { - keys_ = value; - } - onChanged(); - } else { - keysBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearKeys() { - if (keysBuilder_ == null) { - keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); - onChanged(); - } else { - keysBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder getKeysBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getKeysFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() { - if (keysBuilder_ != null) { - return keysBuilder_.getMessageOrBuilder(); - } else { - return keys_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder> - getKeysFieldBuilder() { - if (keysBuilder_ == null) { - keysBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder>( - keys_, - getParentForChildren(), - isClean()); - keys_ = null; - } - return keysBuilder_; - } - - // @@protoc_insertion_point(builder_scope:KeyUpdateCommandProto) - } - - static { - defaultInstance = new KeyUpdateCommandProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:KeyUpdateCommandProto) - } - - public interface RegisterCommandProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class RegisterCommandProto extends - com.google.protobuf.GeneratedMessage - implements RegisterCommandProtoOrBuilder { - // Use RegisterCommandProto.newBuilder() to construct. - private RegisterCommandProto(Builder builder) { - super(builder); - } - private RegisterCommandProto(boolean noInit) {} - - private static final RegisterCommandProto defaultInstance; - public static RegisterCommandProto getDefaultInstance() { - return defaultInstance; - } - - public RegisterCommandProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterCommandProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterCommandProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:RegisterCommandProto) - } - - static { - defaultInstance = new RegisterCommandProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RegisterCommandProto) - } - - public interface UpgradeCommandProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .UpgradeCommandProto.Action action = 1; - boolean hasAction(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action getAction(); - - // required uint32 version = 2; - boolean hasVersion(); - int getVersion(); - - // required uint32 upgradeStatus = 3; - boolean hasUpgradeStatus(); - int getUpgradeStatus(); - } - public static final class UpgradeCommandProto extends - com.google.protobuf.GeneratedMessage - implements UpgradeCommandProtoOrBuilder { - // Use UpgradeCommandProto.newBuilder() to construct. - private UpgradeCommandProto(Builder builder) { - super(builder); - } - private UpgradeCommandProto(boolean noInit) {} - - private static final UpgradeCommandProto defaultInstance; - public static UpgradeCommandProto getDefaultInstance() { - return defaultInstance; - } - - public UpgradeCommandProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_UpgradeCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_UpgradeCommandProto_fieldAccessorTable; - } - - public enum Action - implements com.google.protobuf.ProtocolMessageEnum { - UNKNOWN(0, 0), - REPORT_STATUS(1, 100), - START_UPGRADE(2, 101), - ; - - public static final int UNKNOWN_VALUE = 0; - public static final int REPORT_STATUS_VALUE = 100; - public static final int START_UPGRADE_VALUE = 101; - - - public final int getNumber() { return value; } - - public static Action valueOf(int value) { - switch (value) { - case 0: return UNKNOWN; - case 100: return REPORT_STATUS; - case 101: return START_UPGRADE; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public Action findValueByNumber(int number) { - return Action.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDescriptor().getEnumTypes().get(0); - } - - private static final Action[] VALUES = { - UNKNOWN, REPORT_STATUS, START_UPGRADE, - }; - - public static Action valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private Action(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:UpgradeCommandProto.Action) - } - - private int bitField0_; - // required .UpgradeCommandProto.Action action = 1; - public static final int ACTION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action action_; - public boolean hasAction() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action getAction() { - return action_; - } - - // required uint32 version = 2; - public static final int VERSION_FIELD_NUMBER = 2; - private int version_; - public boolean hasVersion() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getVersion() { - return version_; - } - - // required uint32 upgradeStatus = 3; - public static final int UPGRADESTATUS_FIELD_NUMBER = 3; - private int upgradeStatus_; - public boolean hasUpgradeStatus() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public int getUpgradeStatus() { - return upgradeStatus_; - } - - private void initFields() { - action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action.UNKNOWN; - version_ = 0; - upgradeStatus_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasAction()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasVersion()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasUpgradeStatus()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, action_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt32(2, version_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt32(3, upgradeStatus_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, action_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(2, version_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(3, upgradeStatus_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto) obj; - - boolean result = true; - result = result && (hasAction() == other.hasAction()); - if (hasAction()) { - result = result && - (getAction() == other.getAction()); - } - result = result && (hasVersion() == other.hasVersion()); - if (hasVersion()) { - result = result && (getVersion() - == other.getVersion()); - } - result = result && (hasUpgradeStatus() == other.hasUpgradeStatus()); - if (hasUpgradeStatus()) { - result = result && (getUpgradeStatus() - == other.getUpgradeStatus()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasAction()) { - hash = (37 * hash) + ACTION_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getAction()); - } - if (hasVersion()) { - hash = (37 * hash) + VERSION_FIELD_NUMBER; - hash = (53 * hash) + getVersion(); - } - if (hasUpgradeStatus()) { - hash = (37 * hash) + UPGRADESTATUS_FIELD_NUMBER; - hash = (53 * hash) + getUpgradeStatus(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_UpgradeCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_UpgradeCommandProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action.UNKNOWN; - bitField0_ = (bitField0_ & ~0x00000001); - version_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - upgradeStatus_ = 0; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.action_ = action_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.version_ = version_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.upgradeStatus_ = upgradeStatus_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance()) return this; - if (other.hasAction()) { - setAction(other.getAction()); - } - if (other.hasVersion()) { - setVersion(other.getVersion()); - } - if (other.hasUpgradeStatus()) { - setUpgradeStatus(other.getUpgradeStatus()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasAction()) { - - return false; - } - if (!hasVersion()) { - - return false; - } - if (!hasUpgradeStatus()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action value = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - action_ = value; - } - break; - } - case 16: { - bitField0_ |= 0x00000002; - version_ = input.readUInt32(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - upgradeStatus_ = input.readUInt32(); - break; - } - } - } - } - - private int bitField0_; - - // required .UpgradeCommandProto.Action action = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action.UNKNOWN; - public boolean hasAction() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action getAction() { - return action_; - } - public Builder setAction(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - action_ = value; - onChanged(); - return this; - } - public Builder clearAction() { - bitField0_ = (bitField0_ & ~0x00000001); - action_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Action.UNKNOWN; - onChanged(); - return this; - } - - // required uint32 version = 2; - private int version_ ; - public boolean hasVersion() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getVersion() { - return version_; - } - public Builder setVersion(int value) { - bitField0_ |= 0x00000002; - version_ = value; - onChanged(); - return this; - } - public Builder clearVersion() { - bitField0_ = (bitField0_ & ~0x00000002); - version_ = 0; - onChanged(); - return this; - } - - // required uint32 upgradeStatus = 3; - private int upgradeStatus_ ; - public boolean hasUpgradeStatus() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public int getUpgradeStatus() { - return upgradeStatus_; - } - public Builder setUpgradeStatus(int value) { - bitField0_ |= 0x00000004; - upgradeStatus_ = value; - onChanged(); - return this; - } - public Builder clearUpgradeStatus() { - bitField0_ = (bitField0_ & ~0x00000004); - upgradeStatus_ = 0; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:UpgradeCommandProto) - } - - static { - defaultInstance = new UpgradeCommandProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:UpgradeCommandProto) - } - - public interface RegisterDatanodeRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .DatanodeRegistrationProto registration = 1; - boolean hasRegistration(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); - } - public static final class RegisterDatanodeRequestProto extends - com.google.protobuf.GeneratedMessage - implements RegisterDatanodeRequestProtoOrBuilder { - // Use RegisterDatanodeRequestProto.newBuilder() to construct. - private RegisterDatanodeRequestProto(Builder builder) { - super(builder); - } - private RegisterDatanodeRequestProto(boolean noInit) {} - - private static final RegisterDatanodeRequestProto defaultInstance; - public static RegisterDatanodeRequestProto getDefaultInstance() { - return defaultInstance; - } - - public RegisterDatanodeRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .DatanodeRegistrationProto registration = 1; - public static final int REGISTRATION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() { - return registration_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - return registration_; - } - - private void initFields() { - registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRegistration()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegistration().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, registration_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, registration_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto) obj; - - boolean result = true; - result = result && (hasRegistration() == other.hasRegistration()); - if (hasRegistration()) { - result = result && getRegistration() - .equals(other.getRegistration()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegistration()) { - hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; - hash = (53 * hash) + getRegistration().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegistrationFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (registrationBuilder_ == null) { - result.registration_ = registration_; - } else { - result.registration_ = registrationBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance()) return this; - if (other.hasRegistration()) { - mergeRegistration(other.getRegistration()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRegistration()) { - - return false; - } - if (!getRegistration().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(); - if (hasRegistration()) { - subBuilder.mergeFrom(getRegistration()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegistration(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .DatanodeRegistrationProto registration = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() { - if (registrationBuilder_ == null) { - return registration_; - } else { - return registrationBuilder_.getMessage(); - } - } - public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - registration_ = value; - onChanged(); - } else { - registrationBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setRegistration( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) { - if (registrationBuilder_ == null) { - registration_ = builderForValue.build(); - onChanged(); - } else { - registrationBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) { - registration_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); - } else { - registration_ = value; - } - onChanged(); - } else { - registrationBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearRegistration() { - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - onChanged(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegistrationFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - if (registrationBuilder_ != null) { - return registrationBuilder_.getMessageOrBuilder(); - } else { - return registration_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> - getRegistrationFieldBuilder() { - if (registrationBuilder_ == null) { - registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>( - registration_, - getParentForChildren(), - isClean()); - registration_ = null; - } - return registrationBuilder_; - } - - // @@protoc_insertion_point(builder_scope:RegisterDatanodeRequestProto) - } - - static { - defaultInstance = new RegisterDatanodeRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RegisterDatanodeRequestProto) - } - - public interface RegisterDatanodeResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .DatanodeRegistrationProto registration = 1; - boolean hasRegistration(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); - } - public static final class RegisterDatanodeResponseProto extends - com.google.protobuf.GeneratedMessage - implements RegisterDatanodeResponseProtoOrBuilder { - // Use RegisterDatanodeResponseProto.newBuilder() to construct. - private RegisterDatanodeResponseProto(Builder builder) { - super(builder); - } - private RegisterDatanodeResponseProto(boolean noInit) {} - - private static final RegisterDatanodeResponseProto defaultInstance; - public static RegisterDatanodeResponseProto getDefaultInstance() { - return defaultInstance; - } - - public RegisterDatanodeResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .DatanodeRegistrationProto registration = 1; - public static final int REGISTRATION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() { - return registration_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - return registration_; - } - - private void initFields() { - registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRegistration()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegistration().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, registration_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, registration_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto) obj; - - boolean result = true; - result = result && (hasRegistration() == other.hasRegistration()); - if (hasRegistration()) { - result = result && getRegistration() - .equals(other.getRegistration()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegistration()) { - hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; - hash = (53 * hash) + getRegistration().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegistrationFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (registrationBuilder_ == null) { - result.registration_ = registration_; - } else { - result.registration_ = registrationBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance()) return this; - if (other.hasRegistration()) { - mergeRegistration(other.getRegistration()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRegistration()) { - - return false; - } - if (!getRegistration().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(); - if (hasRegistration()) { - subBuilder.mergeFrom(getRegistration()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegistration(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .DatanodeRegistrationProto registration = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() { - if (registrationBuilder_ == null) { - return registration_; - } else { - return registrationBuilder_.getMessage(); - } - } - public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - registration_ = value; - onChanged(); - } else { - registrationBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setRegistration( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) { - if (registrationBuilder_ == null) { - registration_ = builderForValue.build(); - onChanged(); - } else { - registrationBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) { - registration_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); - } else { - registration_ = value; - } - onChanged(); - } else { - registrationBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearRegistration() { - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - onChanged(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegistrationFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - if (registrationBuilder_ != null) { - return registrationBuilder_.getMessageOrBuilder(); - } else { - return registration_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> - getRegistrationFieldBuilder() { - if (registrationBuilder_ == null) { - registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>( - registration_, - getParentForChildren(), - isClean()); - registration_ = null; - } - return registrationBuilder_; - } - - // @@protoc_insertion_point(builder_scope:RegisterDatanodeResponseProto) - } - - static { - defaultInstance = new RegisterDatanodeResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RegisterDatanodeResponseProto) - } - - public interface HeartbeatRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .DatanodeRegistrationProto registration = 1; - boolean hasRegistration(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); - - // required uint64 capacity = 2; - boolean hasCapacity(); - long getCapacity(); - - // required uint64 dfsUsed = 3; - boolean hasDfsUsed(); - long getDfsUsed(); - - // required uint64 remaining = 4; - boolean hasRemaining(); - long getRemaining(); - - // required uint64 blockPoolUsed = 5; - boolean hasBlockPoolUsed(); - long getBlockPoolUsed(); - - // required uint32 xmitsInProgress = 6; - boolean hasXmitsInProgress(); - int getXmitsInProgress(); - - // required uint32 xceiverCount = 7; - boolean hasXceiverCount(); - int getXceiverCount(); - - // required uint32 failedVolumes = 8; - boolean hasFailedVolumes(); - int getFailedVolumes(); - } - public static final class HeartbeatRequestProto extends - com.google.protobuf.GeneratedMessage - implements HeartbeatRequestProtoOrBuilder { - // Use HeartbeatRequestProto.newBuilder() to construct. - private HeartbeatRequestProto(Builder builder) { - super(builder); - } - private HeartbeatRequestProto(boolean noInit) {} - - private static final HeartbeatRequestProto defaultInstance; - public static HeartbeatRequestProto getDefaultInstance() { - return defaultInstance; - } - - public HeartbeatRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .DatanodeRegistrationProto registration = 1; - public static final int REGISTRATION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() { - return registration_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - return registration_; - } - - // required uint64 capacity = 2; - public static final int CAPACITY_FIELD_NUMBER = 2; - private long capacity_; - public boolean hasCapacity() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getCapacity() { - return capacity_; - } - - // required uint64 dfsUsed = 3; - public static final int DFSUSED_FIELD_NUMBER = 3; - private long dfsUsed_; - public boolean hasDfsUsed() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getDfsUsed() { - return dfsUsed_; - } - - // required uint64 remaining = 4; - public static final int REMAINING_FIELD_NUMBER = 4; - private long remaining_; - public boolean hasRemaining() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public long getRemaining() { - return remaining_; - } - - // required uint64 blockPoolUsed = 5; - public static final int BLOCKPOOLUSED_FIELD_NUMBER = 5; - private long blockPoolUsed_; - public boolean hasBlockPoolUsed() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public long getBlockPoolUsed() { - return blockPoolUsed_; - } - - // required uint32 xmitsInProgress = 6; - public static final int XMITSINPROGRESS_FIELD_NUMBER = 6; - private int xmitsInProgress_; - public boolean hasXmitsInProgress() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public int getXmitsInProgress() { - return xmitsInProgress_; - } - - // required uint32 xceiverCount = 7; - public static final int XCEIVERCOUNT_FIELD_NUMBER = 7; - private int xceiverCount_; - public boolean hasXceiverCount() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - public int getXceiverCount() { - return xceiverCount_; - } - - // required uint32 failedVolumes = 8; - public static final int FAILEDVOLUMES_FIELD_NUMBER = 8; - private int failedVolumes_; - public boolean hasFailedVolumes() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - public int getFailedVolumes() { - return failedVolumes_; - } - - private void initFields() { - registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - capacity_ = 0L; - dfsUsed_ = 0L; - remaining_ = 0L; - blockPoolUsed_ = 0L; - xmitsInProgress_ = 0; - xceiverCount_ = 0; - failedVolumes_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRegistration()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCapacity()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasDfsUsed()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasRemaining()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBlockPoolUsed()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasXmitsInProgress()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasXceiverCount()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasFailedVolumes()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegistration().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, registration_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, capacity_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, dfsUsed_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt64(4, remaining_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeUInt64(5, blockPoolUsed_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeUInt32(6, xmitsInProgress_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeUInt32(7, xceiverCount_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - output.writeUInt32(8, failedVolumes_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, registration_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, capacity_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, dfsUsed_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(4, remaining_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(5, blockPoolUsed_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(6, xmitsInProgress_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(7, xceiverCount_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(8, failedVolumes_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto) obj; - - boolean result = true; - result = result && (hasRegistration() == other.hasRegistration()); - if (hasRegistration()) { - result = result && getRegistration() - .equals(other.getRegistration()); - } - result = result && (hasCapacity() == other.hasCapacity()); - if (hasCapacity()) { - result = result && (getCapacity() - == other.getCapacity()); - } - result = result && (hasDfsUsed() == other.hasDfsUsed()); - if (hasDfsUsed()) { - result = result && (getDfsUsed() - == other.getDfsUsed()); - } - result = result && (hasRemaining() == other.hasRemaining()); - if (hasRemaining()) { - result = result && (getRemaining() - == other.getRemaining()); - } - result = result && (hasBlockPoolUsed() == other.hasBlockPoolUsed()); - if (hasBlockPoolUsed()) { - result = result && (getBlockPoolUsed() - == other.getBlockPoolUsed()); - } - result = result && (hasXmitsInProgress() == other.hasXmitsInProgress()); - if (hasXmitsInProgress()) { - result = result && (getXmitsInProgress() - == other.getXmitsInProgress()); - } - result = result && (hasXceiverCount() == other.hasXceiverCount()); - if (hasXceiverCount()) { - result = result && (getXceiverCount() - == other.getXceiverCount()); - } - result = result && (hasFailedVolumes() == other.hasFailedVolumes()); - if (hasFailedVolumes()) { - result = result && (getFailedVolumes() - == other.getFailedVolumes()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegistration()) { - hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; - hash = (53 * hash) + getRegistration().hashCode(); - } - if (hasCapacity()) { - hash = (37 * hash) + CAPACITY_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCapacity()); - } - if (hasDfsUsed()) { - hash = (37 * hash) + DFSUSED_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getDfsUsed()); - } - if (hasRemaining()) { - hash = (37 * hash) + REMAINING_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getRemaining()); - } - if (hasBlockPoolUsed()) { - hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getBlockPoolUsed()); - } - if (hasXmitsInProgress()) { - hash = (37 * hash) + XMITSINPROGRESS_FIELD_NUMBER; - hash = (53 * hash) + getXmitsInProgress(); - } - if (hasXceiverCount()) { - hash = (37 * hash) + XCEIVERCOUNT_FIELD_NUMBER; - hash = (53 * hash) + getXceiverCount(); - } - if (hasFailedVolumes()) { - hash = (37 * hash) + FAILEDVOLUMES_FIELD_NUMBER; - hash = (53 * hash) + getFailedVolumes(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegistrationFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - capacity_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - dfsUsed_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - remaining_ = 0L; - bitField0_ = (bitField0_ & ~0x00000008); - blockPoolUsed_ = 0L; - bitField0_ = (bitField0_ & ~0x00000010); - xmitsInProgress_ = 0; - bitField0_ = (bitField0_ & ~0x00000020); - xceiverCount_ = 0; - bitField0_ = (bitField0_ & ~0x00000040); - failedVolumes_ = 0; - bitField0_ = (bitField0_ & ~0x00000080); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (registrationBuilder_ == null) { - result.registration_ = registration_; - } else { - result.registration_ = registrationBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.capacity_ = capacity_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.dfsUsed_ = dfsUsed_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.remaining_ = remaining_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.blockPoolUsed_ = blockPoolUsed_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.xmitsInProgress_ = xmitsInProgress_; - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000040; - } - result.xceiverCount_ = xceiverCount_; - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000080; - } - result.failedVolumes_ = failedVolumes_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance()) return this; - if (other.hasRegistration()) { - mergeRegistration(other.getRegistration()); - } - if (other.hasCapacity()) { - setCapacity(other.getCapacity()); - } - if (other.hasDfsUsed()) { - setDfsUsed(other.getDfsUsed()); - } - if (other.hasRemaining()) { - setRemaining(other.getRemaining()); - } - if (other.hasBlockPoolUsed()) { - setBlockPoolUsed(other.getBlockPoolUsed()); - } - if (other.hasXmitsInProgress()) { - setXmitsInProgress(other.getXmitsInProgress()); - } - if (other.hasXceiverCount()) { - setXceiverCount(other.getXceiverCount()); - } - if (other.hasFailedVolumes()) { - setFailedVolumes(other.getFailedVolumes()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRegistration()) { - - return false; - } - if (!hasCapacity()) { - - return false; - } - if (!hasDfsUsed()) { - - return false; - } - if (!hasRemaining()) { - - return false; - } - if (!hasBlockPoolUsed()) { - - return false; - } - if (!hasXmitsInProgress()) { - - return false; - } - if (!hasXceiverCount()) { - - return false; - } - if (!hasFailedVolumes()) { - - return false; - } - if (!getRegistration().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(); - if (hasRegistration()) { - subBuilder.mergeFrom(getRegistration()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegistration(subBuilder.buildPartial()); - break; - } - case 16: { - bitField0_ |= 0x00000002; - capacity_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - dfsUsed_ = input.readUInt64(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - remaining_ = input.readUInt64(); - break; - } - case 40: { - bitField0_ |= 0x00000010; - blockPoolUsed_ = input.readUInt64(); - break; - } - case 48: { - bitField0_ |= 0x00000020; - xmitsInProgress_ = input.readUInt32(); - break; - } - case 56: { - bitField0_ |= 0x00000040; - xceiverCount_ = input.readUInt32(); - break; - } - case 64: { - bitField0_ |= 0x00000080; - failedVolumes_ = input.readUInt32(); - break; - } - } - } - } - - private int bitField0_; - - // required .DatanodeRegistrationProto registration = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() { - if (registrationBuilder_ == null) { - return registration_; - } else { - return registrationBuilder_.getMessage(); - } - } - public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - registration_ = value; - onChanged(); - } else { - registrationBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setRegistration( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) { - if (registrationBuilder_ == null) { - registration_ = builderForValue.build(); - onChanged(); - } else { - registrationBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) { - registration_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); - } else { - registration_ = value; - } - onChanged(); - } else { - registrationBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearRegistration() { - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - onChanged(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegistrationFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - if (registrationBuilder_ != null) { - return registrationBuilder_.getMessageOrBuilder(); - } else { - return registration_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> - getRegistrationFieldBuilder() { - if (registrationBuilder_ == null) { - registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>( - registration_, - getParentForChildren(), - isClean()); - registration_ = null; - } - return registrationBuilder_; - } - - // required uint64 capacity = 2; - private long capacity_ ; - public boolean hasCapacity() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getCapacity() { - return capacity_; - } - public Builder setCapacity(long value) { - bitField0_ |= 0x00000002; - capacity_ = value; - onChanged(); - return this; - } - public Builder clearCapacity() { - bitField0_ = (bitField0_ & ~0x00000002); - capacity_ = 0L; - onChanged(); - return this; - } - - // required uint64 dfsUsed = 3; - private long dfsUsed_ ; - public boolean hasDfsUsed() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getDfsUsed() { - return dfsUsed_; - } - public Builder setDfsUsed(long value) { - bitField0_ |= 0x00000004; - dfsUsed_ = value; - onChanged(); - return this; - } - public Builder clearDfsUsed() { - bitField0_ = (bitField0_ & ~0x00000004); - dfsUsed_ = 0L; - onChanged(); - return this; - } - - // required uint64 remaining = 4; - private long remaining_ ; - public boolean hasRemaining() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public long getRemaining() { - return remaining_; - } - public Builder setRemaining(long value) { - bitField0_ |= 0x00000008; - remaining_ = value; - onChanged(); - return this; - } - public Builder clearRemaining() { - bitField0_ = (bitField0_ & ~0x00000008); - remaining_ = 0L; - onChanged(); - return this; - } - - // required uint64 blockPoolUsed = 5; - private long blockPoolUsed_ ; - public boolean hasBlockPoolUsed() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public long getBlockPoolUsed() { - return blockPoolUsed_; - } - public Builder setBlockPoolUsed(long value) { - bitField0_ |= 0x00000010; - blockPoolUsed_ = value; - onChanged(); - return this; - } - public Builder clearBlockPoolUsed() { - bitField0_ = (bitField0_ & ~0x00000010); - blockPoolUsed_ = 0L; - onChanged(); - return this; - } - - // required uint32 xmitsInProgress = 6; - private int xmitsInProgress_ ; - public boolean hasXmitsInProgress() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public int getXmitsInProgress() { - return xmitsInProgress_; - } - public Builder setXmitsInProgress(int value) { - bitField0_ |= 0x00000020; - xmitsInProgress_ = value; - onChanged(); - return this; - } - public Builder clearXmitsInProgress() { - bitField0_ = (bitField0_ & ~0x00000020); - xmitsInProgress_ = 0; - onChanged(); - return this; - } - - // required uint32 xceiverCount = 7; - private int xceiverCount_ ; - public boolean hasXceiverCount() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - public int getXceiverCount() { - return xceiverCount_; - } - public Builder setXceiverCount(int value) { - bitField0_ |= 0x00000040; - xceiverCount_ = value; - onChanged(); - return this; - } - public Builder clearXceiverCount() { - bitField0_ = (bitField0_ & ~0x00000040); - xceiverCount_ = 0; - onChanged(); - return this; - } - - // required uint32 failedVolumes = 8; - private int failedVolumes_ ; - public boolean hasFailedVolumes() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - public int getFailedVolumes() { - return failedVolumes_; - } - public Builder setFailedVolumes(int value) { - bitField0_ |= 0x00000080; - failedVolumes_ = value; - onChanged(); - return this; - } - public Builder clearFailedVolumes() { - bitField0_ = (bitField0_ & ~0x00000080); - failedVolumes_ = 0; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:HeartbeatRequestProto) - } - - static { - defaultInstance = new HeartbeatRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:HeartbeatRequestProto) - } - - public interface HeartbeatResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .DatanodeCommandProto cmds = 1; - java.util.List - getCmdsList(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmds(int index); - int getCmdsCount(); - java.util.List - getCmdsOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdsOrBuilder( - int index); - } - public static final class HeartbeatResponseProto extends - com.google.protobuf.GeneratedMessage - implements HeartbeatResponseProtoOrBuilder { - // Use HeartbeatResponseProto.newBuilder() to construct. - private HeartbeatResponseProto(Builder builder) { - super(builder); - } - private HeartbeatResponseProto(boolean noInit) {} - - private static final HeartbeatResponseProto defaultInstance; - public static HeartbeatResponseProto getDefaultInstance() { - return defaultInstance; - } - - public HeartbeatResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatResponseProto_fieldAccessorTable; - } - - // repeated .DatanodeCommandProto cmds = 1; - public static final int CMDS_FIELD_NUMBER = 1; - private java.util.List cmds_; - public java.util.List getCmdsList() { - return cmds_; - } - public java.util.List - getCmdsOrBuilderList() { - return cmds_; - } - public int getCmdsCount() { - return cmds_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmds(int index) { - return cmds_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdsOrBuilder( - int index) { - return cmds_.get(index); - } - - private void initFields() { - cmds_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getCmdsCount(); i++) { - if (!getCmds(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < cmds_.size(); i++) { - output.writeMessage(1, cmds_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < cmds_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, cmds_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto) obj; - - boolean result = true; - result = result && getCmdsList() - .equals(other.getCmdsList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getCmdsCount() > 0) { - hash = (37 * hash) + CMDS_FIELD_NUMBER; - hash = (53 * hash) + getCmdsList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getCmdsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (cmdsBuilder_ == null) { - cmds_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - cmdsBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto(this); - int from_bitField0_ = bitField0_; - if (cmdsBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - cmds_ = java.util.Collections.unmodifiableList(cmds_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.cmds_ = cmds_; - } else { - result.cmds_ = cmdsBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance()) return this; - if (cmdsBuilder_ == null) { - if (!other.cmds_.isEmpty()) { - if (cmds_.isEmpty()) { - cmds_ = other.cmds_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureCmdsIsMutable(); - cmds_.addAll(other.cmds_); - } - onChanged(); - } - } else { - if (!other.cmds_.isEmpty()) { - if (cmdsBuilder_.isEmpty()) { - cmdsBuilder_.dispose(); - cmdsBuilder_ = null; - cmds_ = other.cmds_; - bitField0_ = (bitField0_ & ~0x00000001); - cmdsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getCmdsFieldBuilder() : null; - } else { - cmdsBuilder_.addAllMessages(other.cmds_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getCmdsCount(); i++) { - if (!getCmds(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addCmds(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // repeated .DatanodeCommandProto cmds = 1; - private java.util.List cmds_ = - java.util.Collections.emptyList(); - private void ensureCmdsIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - cmds_ = new java.util.ArrayList(cmds_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> cmdsBuilder_; - - public java.util.List getCmdsList() { - if (cmdsBuilder_ == null) { - return java.util.Collections.unmodifiableList(cmds_); - } else { - return cmdsBuilder_.getMessageList(); - } - } - public int getCmdsCount() { - if (cmdsBuilder_ == null) { - return cmds_.size(); - } else { - return cmdsBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmds(int index) { - if (cmdsBuilder_ == null) { - return cmds_.get(index); - } else { - return cmdsBuilder_.getMessage(index); - } - } - public Builder setCmds( - int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) { - if (cmdsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureCmdsIsMutable(); - cmds_.set(index, value); - onChanged(); - } else { - cmdsBuilder_.setMessage(index, value); - } - return this; - } - public Builder setCmds( - int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) { - if (cmdsBuilder_ == null) { - ensureCmdsIsMutable(); - cmds_.set(index, builderForValue.build()); - onChanged(); - } else { - cmdsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addCmds(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) { - if (cmdsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureCmdsIsMutable(); - cmds_.add(value); - onChanged(); - } else { - cmdsBuilder_.addMessage(value); - } - return this; - } - public Builder addCmds( - int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) { - if (cmdsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureCmdsIsMutable(); - cmds_.add(index, value); - onChanged(); - } else { - cmdsBuilder_.addMessage(index, value); - } - return this; - } - public Builder addCmds( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) { - if (cmdsBuilder_ == null) { - ensureCmdsIsMutable(); - cmds_.add(builderForValue.build()); - onChanged(); - } else { - cmdsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addCmds( - int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) { - if (cmdsBuilder_ == null) { - ensureCmdsIsMutable(); - cmds_.add(index, builderForValue.build()); - onChanged(); - } else { - cmdsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllCmds( - java.lang.Iterable values) { - if (cmdsBuilder_ == null) { - ensureCmdsIsMutable(); - super.addAll(values, cmds_); - onChanged(); - } else { - cmdsBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearCmds() { - if (cmdsBuilder_ == null) { - cmds_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - cmdsBuilder_.clear(); - } - return this; - } - public Builder removeCmds(int index) { - if (cmdsBuilder_ == null) { - ensureCmdsIsMutable(); - cmds_.remove(index); - onChanged(); - } else { - cmdsBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder getCmdsBuilder( - int index) { - return getCmdsFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdsOrBuilder( - int index) { - if (cmdsBuilder_ == null) { - return cmds_.get(index); } else { - return cmdsBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getCmdsOrBuilderList() { - if (cmdsBuilder_ != null) { - return cmdsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(cmds_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder addCmdsBuilder() { - return getCmdsFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder addCmdsBuilder( - int index) { - return getCmdsFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance()); - } - public java.util.List - getCmdsBuilderList() { - return getCmdsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> - getCmdsFieldBuilder() { - if (cmdsBuilder_ == null) { - cmdsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>( - cmds_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - cmds_ = null; - } - return cmdsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:HeartbeatResponseProto) - } - - static { - defaultInstance = new HeartbeatResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:HeartbeatResponseProto) - } - - public interface BlockReportRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .DatanodeRegistrationProto registration = 1; - boolean hasRegistration(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); - - // required string blockPoolId = 2; - boolean hasBlockPoolId(); - String getBlockPoolId(); - - // repeated uint64 blocks = 3 [packed = true]; - java.util.List getBlocksList(); - int getBlocksCount(); - long getBlocks(int index); - } - public static final class BlockReportRequestProto extends - com.google.protobuf.GeneratedMessage - implements BlockReportRequestProtoOrBuilder { - // Use BlockReportRequestProto.newBuilder() to construct. - private BlockReportRequestProto(Builder builder) { - super(builder); - } - private BlockReportRequestProto(boolean noInit) {} - - private static final BlockReportRequestProto defaultInstance; - public static BlockReportRequestProto getDefaultInstance() { - return defaultInstance; - } - - public BlockReportRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .DatanodeRegistrationProto registration = 1; - public static final int REGISTRATION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() { - return registration_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - return registration_; - } - - // required string blockPoolId = 2; - public static final int BLOCKPOOLID_FIELD_NUMBER = 2; - private java.lang.Object blockPoolId_; - public boolean hasBlockPoolId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getBlockPoolId() { - java.lang.Object ref = blockPoolId_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - blockPoolId_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getBlockPoolIdBytes() { - java.lang.Object ref = blockPoolId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - blockPoolId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // repeated uint64 blocks = 3 [packed = true]; - public static final int BLOCKS_FIELD_NUMBER = 3; - private java.util.List blocks_; - public java.util.List - getBlocksList() { - return blocks_; - } - public int getBlocksCount() { - return blocks_.size(); - } - public long getBlocks(int index) { - return blocks_.get(index); - } - private int blocksMemoizedSerializedSize = -1; - - private void initFields() { - registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - blockPoolId_ = ""; - blocks_ = java.util.Collections.emptyList();; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRegistration()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBlockPoolId()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegistration().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, registration_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getBlockPoolIdBytes()); - } - if (getBlocksList().size() > 0) { - output.writeRawVarint32(26); - output.writeRawVarint32(blocksMemoizedSerializedSize); - } - for (int i = 0; i < blocks_.size(); i++) { - output.writeUInt64NoTag(blocks_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, registration_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getBlockPoolIdBytes()); - } - { - int dataSize = 0; - for (int i = 0; i < blocks_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeUInt64SizeNoTag(blocks_.get(i)); - } - size += dataSize; - if (!getBlocksList().isEmpty()) { - size += 1; - size += com.google.protobuf.CodedOutputStream - .computeInt32SizeNoTag(dataSize); - } - blocksMemoizedSerializedSize = dataSize; - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto) obj; - - boolean result = true; - result = result && (hasRegistration() == other.hasRegistration()); - if (hasRegistration()) { - result = result && getRegistration() - .equals(other.getRegistration()); - } - result = result && (hasBlockPoolId() == other.hasBlockPoolId()); - if (hasBlockPoolId()) { - result = result && getBlockPoolId() - .equals(other.getBlockPoolId()); - } - result = result && getBlocksList() - .equals(other.getBlocksList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegistration()) { - hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; - hash = (53 * hash) + getRegistration().hashCode(); - } - if (hasBlockPoolId()) { - hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; - hash = (53 * hash) + getBlockPoolId().hashCode(); - } - if (getBlocksCount() > 0) { - hash = (37 * hash) + BLOCKS_FIELD_NUMBER; - hash = (53 * hash) + getBlocksList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegistrationFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - blockPoolId_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - blocks_ = java.util.Collections.emptyList();; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (registrationBuilder_ == null) { - result.registration_ = registration_; - } else { - result.registration_ = registrationBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.blockPoolId_ = blockPoolId_; - if (((bitField0_ & 0x00000004) == 0x00000004)) { - blocks_ = java.util.Collections.unmodifiableList(blocks_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.blocks_ = blocks_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance()) return this; - if (other.hasRegistration()) { - mergeRegistration(other.getRegistration()); - } - if (other.hasBlockPoolId()) { - setBlockPoolId(other.getBlockPoolId()); - } - if (!other.blocks_.isEmpty()) { - if (blocks_.isEmpty()) { - blocks_ = other.blocks_; - bitField0_ = (bitField0_ & ~0x00000004); - } else { - ensureBlocksIsMutable(); - blocks_.addAll(other.blocks_); - } - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRegistration()) { - - return false; - } - if (!hasBlockPoolId()) { - - return false; - } - if (!getRegistration().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(); - if (hasRegistration()) { - subBuilder.mergeFrom(getRegistration()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegistration(subBuilder.buildPartial()); - break; - } - case 18: { - bitField0_ |= 0x00000002; - blockPoolId_ = input.readBytes(); - break; - } - case 24: { - ensureBlocksIsMutable(); - blocks_.add(input.readUInt64()); - break; - } - case 26: { - int length = input.readRawVarint32(); - int limit = input.pushLimit(length); - while (input.getBytesUntilLimit() > 0) { - addBlocks(input.readUInt64()); - } - input.popLimit(limit); - break; - } - } - } - } - - private int bitField0_; - - // required .DatanodeRegistrationProto registration = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() { - if (registrationBuilder_ == null) { - return registration_; - } else { - return registrationBuilder_.getMessage(); - } - } - public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - registration_ = value; - onChanged(); - } else { - registrationBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setRegistration( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) { - if (registrationBuilder_ == null) { - registration_ = builderForValue.build(); - onChanged(); - } else { - registrationBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) { - registration_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); - } else { - registration_ = value; - } - onChanged(); - } else { - registrationBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearRegistration() { - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - onChanged(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegistrationFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - if (registrationBuilder_ != null) { - return registrationBuilder_.getMessageOrBuilder(); - } else { - return registration_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> - getRegistrationFieldBuilder() { - if (registrationBuilder_ == null) { - registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>( - registration_, - getParentForChildren(), - isClean()); - registration_ = null; - } - return registrationBuilder_; - } - - // required string blockPoolId = 2; - private java.lang.Object blockPoolId_ = ""; - public boolean hasBlockPoolId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getBlockPoolId() { - java.lang.Object ref = blockPoolId_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - blockPoolId_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setBlockPoolId(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - blockPoolId_ = value; - onChanged(); - return this; - } - public Builder clearBlockPoolId() { - bitField0_ = (bitField0_ & ~0x00000002); - blockPoolId_ = getDefaultInstance().getBlockPoolId(); - onChanged(); - return this; - } - void setBlockPoolId(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - blockPoolId_ = value; - onChanged(); - } - - // repeated uint64 blocks = 3 [packed = true]; - private java.util.List blocks_ = java.util.Collections.emptyList();; - private void ensureBlocksIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - blocks_ = new java.util.ArrayList(blocks_); - bitField0_ |= 0x00000004; - } - } - public java.util.List - getBlocksList() { - return java.util.Collections.unmodifiableList(blocks_); - } - public int getBlocksCount() { - return blocks_.size(); - } - public long getBlocks(int index) { - return blocks_.get(index); - } - public Builder setBlocks( - int index, long value) { - ensureBlocksIsMutable(); - blocks_.set(index, value); - onChanged(); - return this; - } - public Builder addBlocks(long value) { - ensureBlocksIsMutable(); - blocks_.add(value); - onChanged(); - return this; - } - public Builder addAllBlocks( - java.lang.Iterable values) { - ensureBlocksIsMutable(); - super.addAll(values, blocks_); - onChanged(); - return this; - } - public Builder clearBlocks() { - blocks_ = java.util.Collections.emptyList();; - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:BlockReportRequestProto) - } - - static { - defaultInstance = new BlockReportRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:BlockReportRequestProto) - } - - public interface BlockReportResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .DatanodeCommandProto cmd = 1; - boolean hasCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder(); - } - public static final class BlockReportResponseProto extends - com.google.protobuf.GeneratedMessage - implements BlockReportResponseProtoOrBuilder { - // Use BlockReportResponseProto.newBuilder() to construct. - private BlockReportResponseProto(Builder builder) { - super(builder); - } - private BlockReportResponseProto(boolean noInit) {} - - private static final BlockReportResponseProto defaultInstance; - public static BlockReportResponseProto getDefaultInstance() { - return defaultInstance; - } - - public BlockReportResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .DatanodeCommandProto cmd = 1; - public static final int CMD_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto cmd_; - public boolean hasCmd() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd() { - return cmd_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder() { - return cmd_; - } - - private void initFields() { - cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasCmd()) { - memoizedIsInitialized = 0; - return false; - } - if (!getCmd().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, cmd_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, cmd_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) obj; - - boolean result = true; - result = result && (hasCmd() == other.hasCmd()); - if (hasCmd()) { - result = result && getCmd() - .equals(other.getCmd()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasCmd()) { - hash = (37 * hash) + CMD_FIELD_NUMBER; - hash = (53 * hash) + getCmd().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getCmdFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (cmdBuilder_ == null) { - cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance(); - } else { - cmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (cmdBuilder_ == null) { - result.cmd_ = cmd_; - } else { - result.cmd_ = cmdBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance()) return this; - if (other.hasCmd()) { - mergeCmd(other.getCmd()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasCmd()) { - - return false; - } - if (!getCmd().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder(); - if (hasCmd()) { - subBuilder.mergeFrom(getCmd()); - } - input.readMessage(subBuilder, extensionRegistry); - setCmd(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .DatanodeCommandProto cmd = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> cmdBuilder_; - public boolean hasCmd() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd() { - if (cmdBuilder_ == null) { - return cmd_; - } else { - return cmdBuilder_.getMessage(); - } - } - public Builder setCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) { - if (cmdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - cmd_ = value; - onChanged(); - } else { - cmdBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setCmd( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) { - if (cmdBuilder_ == null) { - cmd_ = builderForValue.build(); - onChanged(); - } else { - cmdBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) { - if (cmdBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - cmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance()) { - cmd_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder(cmd_).mergeFrom(value).buildPartial(); - } else { - cmd_ = value; - } - onChanged(); - } else { - cmdBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearCmd() { - if (cmdBuilder_ == null) { - cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance(); - onChanged(); - } else { - cmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder getCmdBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getCmdFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder() { - if (cmdBuilder_ != null) { - return cmdBuilder_.getMessageOrBuilder(); - } else { - return cmd_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> - getCmdFieldBuilder() { - if (cmdBuilder_ == null) { - cmdBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>( - cmd_, - getParentForChildren(), - isClean()); - cmd_ = null; - } - return cmdBuilder_; - } - - // @@protoc_insertion_point(builder_scope:BlockReportResponseProto) - } - - static { - defaultInstance = new BlockReportResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:BlockReportResponseProto) - } - - public interface ReceivedDeletedBlockInfoProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .BlockProto block = 1; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder(); - - // optional string deleteHint = 2; - boolean hasDeleteHint(); - String getDeleteHint(); - } - public static final class ReceivedDeletedBlockInfoProto extends - com.google.protobuf.GeneratedMessage - implements ReceivedDeletedBlockInfoProtoOrBuilder { - // Use ReceivedDeletedBlockInfoProto.newBuilder() to construct. - private ReceivedDeletedBlockInfoProto(Builder builder) { - super(builder); - } - private ReceivedDeletedBlockInfoProto(boolean noInit) {} - - private static final ReceivedDeletedBlockInfoProto defaultInstance; - public static ReceivedDeletedBlockInfoProto getDefaultInstance() { - return defaultInstance; - } - - public ReceivedDeletedBlockInfoProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReceivedDeletedBlockInfoProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReceivedDeletedBlockInfoProto_fieldAccessorTable; - } - - private int bitField0_; - // required .BlockProto block = 1; - public static final int BLOCK_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - // optional string deleteHint = 2; - public static final int DELETEHINT_FIELD_NUMBER = 2; - private java.lang.Object deleteHint_; - public boolean hasDeleteHint() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getDeleteHint() { - java.lang.Object ref = deleteHint_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - deleteHint_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getDeleteHintBytes() { - java.lang.Object ref = deleteHint_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - deleteHint_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); - deleteHint_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, block_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getDeleteHintBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, block_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getDeleteHintBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto) obj; - - boolean result = true; - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && (hasDeleteHint() == other.hasDeleteHint()); - if (hasDeleteHint()) { - result = result && getDeleteHint() - .equals(other.getDeleteHint()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - if (hasDeleteHint()) { - hash = (37 * hash) + DELETEHINT_FIELD_NUMBER; - hash = (53 * hash) + getDeleteHint().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReceivedDeletedBlockInfoProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReceivedDeletedBlockInfoProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - deleteHint_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.deleteHint_ = deleteHint_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance()) return this; - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - if (other.hasDeleteHint()) { - setDeleteHint(other.getDeleteHint()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlock()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - case 18: { - bitField0_ |= 0x00000002; - deleteHint_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required .BlockProto block = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // optional string deleteHint = 2; - private java.lang.Object deleteHint_ = ""; - public boolean hasDeleteHint() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getDeleteHint() { - java.lang.Object ref = deleteHint_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - deleteHint_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setDeleteHint(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - deleteHint_ = value; - onChanged(); - return this; - } - public Builder clearDeleteHint() { - bitField0_ = (bitField0_ & ~0x00000002); - deleteHint_ = getDefaultInstance().getDeleteHint(); - onChanged(); - return this; - } - void setDeleteHint(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - deleteHint_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:ReceivedDeletedBlockInfoProto) - } - - static { - defaultInstance = new ReceivedDeletedBlockInfoProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ReceivedDeletedBlockInfoProto) - } - - public interface BlockReceivedAndDeletedRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .DatanodeRegistrationProto registration = 1; - boolean hasRegistration(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); - - // required string blockPoolId = 2; - boolean hasBlockPoolId(); - String getBlockPoolId(); - - // repeated .ReceivedDeletedBlockInfoProto blocks = 3; - java.util.List - getBlocksList(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getBlocks(int index); - int getBlocksCount(); - java.util.List - getBlocksOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder getBlocksOrBuilder( - int index); - } - public static final class BlockReceivedAndDeletedRequestProto extends - com.google.protobuf.GeneratedMessage - implements BlockReceivedAndDeletedRequestProtoOrBuilder { - // Use BlockReceivedAndDeletedRequestProto.newBuilder() to construct. - private BlockReceivedAndDeletedRequestProto(Builder builder) { - super(builder); - } - private BlockReceivedAndDeletedRequestProto(boolean noInit) {} - - private static final BlockReceivedAndDeletedRequestProto defaultInstance; - public static BlockReceivedAndDeletedRequestProto getDefaultInstance() { - return defaultInstance; - } - - public BlockReceivedAndDeletedRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .DatanodeRegistrationProto registration = 1; - public static final int REGISTRATION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() { - return registration_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - return registration_; - } - - // required string blockPoolId = 2; - public static final int BLOCKPOOLID_FIELD_NUMBER = 2; - private java.lang.Object blockPoolId_; - public boolean hasBlockPoolId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getBlockPoolId() { - java.lang.Object ref = blockPoolId_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - blockPoolId_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getBlockPoolIdBytes() { - java.lang.Object ref = blockPoolId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - blockPoolId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // repeated .ReceivedDeletedBlockInfoProto blocks = 3; - public static final int BLOCKS_FIELD_NUMBER = 3; - private java.util.List blocks_; - public java.util.List getBlocksList() { - return blocks_; - } - public java.util.List - getBlocksOrBuilderList() { - return blocks_; - } - public int getBlocksCount() { - return blocks_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getBlocks(int index) { - return blocks_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder getBlocksOrBuilder( - int index) { - return blocks_.get(index); - } - - private void initFields() { - registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - blockPoolId_ = ""; - blocks_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRegistration()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBlockPoolId()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegistration().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getBlocksCount(); i++) { - if (!getBlocks(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, registration_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getBlockPoolIdBytes()); - } - for (int i = 0; i < blocks_.size(); i++) { - output.writeMessage(3, blocks_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, registration_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getBlockPoolIdBytes()); - } - for (int i = 0; i < blocks_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, blocks_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto) obj; - - boolean result = true; - result = result && (hasRegistration() == other.hasRegistration()); - if (hasRegistration()) { - result = result && getRegistration() - .equals(other.getRegistration()); - } - result = result && (hasBlockPoolId() == other.hasBlockPoolId()); - if (hasBlockPoolId()) { - result = result && getBlockPoolId() - .equals(other.getBlockPoolId()); - } - result = result && getBlocksList() - .equals(other.getBlocksList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegistration()) { - hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; - hash = (53 * hash) + getRegistration().hashCode(); - } - if (hasBlockPoolId()) { - hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; - hash = (53 * hash) + getBlockPoolId().hashCode(); - } - if (getBlocksCount() > 0) { - hash = (37 * hash) + BLOCKS_FIELD_NUMBER; - hash = (53 * hash) + getBlocksList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegistrationFieldBuilder(); - getBlocksFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - blockPoolId_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - if (blocksBuilder_ == null) { - blocks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - } else { - blocksBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (registrationBuilder_ == null) { - result.registration_ = registration_; - } else { - result.registration_ = registrationBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.blockPoolId_ = blockPoolId_; - if (blocksBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004)) { - blocks_ = java.util.Collections.unmodifiableList(blocks_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.blocks_ = blocks_; - } else { - result.blocks_ = blocksBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance()) return this; - if (other.hasRegistration()) { - mergeRegistration(other.getRegistration()); - } - if (other.hasBlockPoolId()) { - setBlockPoolId(other.getBlockPoolId()); - } - if (blocksBuilder_ == null) { - if (!other.blocks_.isEmpty()) { - if (blocks_.isEmpty()) { - blocks_ = other.blocks_; - bitField0_ = (bitField0_ & ~0x00000004); - } else { - ensureBlocksIsMutable(); - blocks_.addAll(other.blocks_); - } - onChanged(); - } - } else { - if (!other.blocks_.isEmpty()) { - if (blocksBuilder_.isEmpty()) { - blocksBuilder_.dispose(); - blocksBuilder_ = null; - blocks_ = other.blocks_; - bitField0_ = (bitField0_ & ~0x00000004); - blocksBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getBlocksFieldBuilder() : null; - } else { - blocksBuilder_.addAllMessages(other.blocks_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRegistration()) { - - return false; - } - if (!hasBlockPoolId()) { - - return false; - } - if (!getRegistration().isInitialized()) { - - return false; - } - for (int i = 0; i < getBlocksCount(); i++) { - if (!getBlocks(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(); - if (hasRegistration()) { - subBuilder.mergeFrom(getRegistration()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegistration(subBuilder.buildPartial()); - break; - } - case 18: { - bitField0_ |= 0x00000002; - blockPoolId_ = input.readBytes(); - break; - } - case 26: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addBlocks(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .DatanodeRegistrationProto registration = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() { - if (registrationBuilder_ == null) { - return registration_; - } else { - return registrationBuilder_.getMessage(); - } - } - public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - registration_ = value; - onChanged(); - } else { - registrationBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setRegistration( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) { - if (registrationBuilder_ == null) { - registration_ = builderForValue.build(); - onChanged(); - } else { - registrationBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) { - registration_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); - } else { - registration_ = value; - } - onChanged(); - } else { - registrationBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearRegistration() { - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - onChanged(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegistrationFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - if (registrationBuilder_ != null) { - return registrationBuilder_.getMessageOrBuilder(); - } else { - return registration_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> - getRegistrationFieldBuilder() { - if (registrationBuilder_ == null) { - registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>( - registration_, - getParentForChildren(), - isClean()); - registration_ = null; - } - return registrationBuilder_; - } - - // required string blockPoolId = 2; - private java.lang.Object blockPoolId_ = ""; - public boolean hasBlockPoolId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getBlockPoolId() { - java.lang.Object ref = blockPoolId_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - blockPoolId_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setBlockPoolId(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - blockPoolId_ = value; - onChanged(); - return this; - } - public Builder clearBlockPoolId() { - bitField0_ = (bitField0_ & ~0x00000002); - blockPoolId_ = getDefaultInstance().getBlockPoolId(); - onChanged(); - return this; - } - void setBlockPoolId(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - blockPoolId_ = value; - onChanged(); - } - - // repeated .ReceivedDeletedBlockInfoProto blocks = 3; - private java.util.List blocks_ = - java.util.Collections.emptyList(); - private void ensureBlocksIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - blocks_ = new java.util.ArrayList(blocks_); - bitField0_ |= 0x00000004; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder> blocksBuilder_; - - public java.util.List getBlocksList() { - if (blocksBuilder_ == null) { - return java.util.Collections.unmodifiableList(blocks_); - } else { - return blocksBuilder_.getMessageList(); - } - } - public int getBlocksCount() { - if (blocksBuilder_ == null) { - return blocks_.size(); - } else { - return blocksBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getBlocks(int index) { - if (blocksBuilder_ == null) { - return blocks_.get(index); - } else { - return blocksBuilder_.getMessage(index); - } - } - public Builder setBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.set(index, value); - onChanged(); - } else { - blocksBuilder_.setMessage(index, value); - } - return this; - } - public Builder setBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.set(index, builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.add(value); - onChanged(); - } else { - blocksBuilder_.addMessage(value); - } - return this; - } - public Builder addBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.add(index, value); - onChanged(); - } else { - blocksBuilder_.addMessage(index, value); - } - return this; - } - public Builder addBlocks( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.add(builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.add(index, builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllBlocks( - java.lang.Iterable values) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - super.addAll(values, blocks_); - onChanged(); - } else { - blocksBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearBlocks() { - if (blocksBuilder_ == null) { - blocks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - } else { - blocksBuilder_.clear(); - } - return this; - } - public Builder removeBlocks(int index) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.remove(index); - onChanged(); - } else { - blocksBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder getBlocksBuilder( - int index) { - return getBlocksFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder getBlocksOrBuilder( - int index) { - if (blocksBuilder_ == null) { - return blocks_.get(index); } else { - return blocksBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getBlocksOrBuilderList() { - if (blocksBuilder_ != null) { - return blocksBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(blocks_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder addBlocksBuilder() { - return getBlocksFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder addBlocksBuilder( - int index) { - return getBlocksFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance()); - } - public java.util.List - getBlocksBuilderList() { - return getBlocksFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder> - getBlocksFieldBuilder() { - if (blocksBuilder_ == null) { - blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>( - blocks_, - ((bitField0_ & 0x00000004) == 0x00000004), - getParentForChildren(), - isClean()); - blocks_ = null; - } - return blocksBuilder_; - } - - // @@protoc_insertion_point(builder_scope:BlockReceivedAndDeletedRequestProto) - } - - static { - defaultInstance = new BlockReceivedAndDeletedRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:BlockReceivedAndDeletedRequestProto) - } - - public interface BlockReceivedAndDeletedResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class BlockReceivedAndDeletedResponseProto extends - com.google.protobuf.GeneratedMessage - implements BlockReceivedAndDeletedResponseProtoOrBuilder { - // Use BlockReceivedAndDeletedResponseProto.newBuilder() to construct. - private BlockReceivedAndDeletedResponseProto(Builder builder) { - super(builder); - } - private BlockReceivedAndDeletedResponseProto(boolean noInit) {} - - private static final BlockReceivedAndDeletedResponseProto defaultInstance; - public static BlockReceivedAndDeletedResponseProto getDefaultInstance() { - return defaultInstance; - } - - public BlockReceivedAndDeletedResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:BlockReceivedAndDeletedResponseProto) - } - - static { - defaultInstance = new BlockReceivedAndDeletedResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:BlockReceivedAndDeletedResponseProto) - } - - public interface ErrorReportRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .DatanodeRegistrationProto registartion = 1; - boolean hasRegistartion(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistartion(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistartionOrBuilder(); - - // required uint32 errorCode = 2; - boolean hasErrorCode(); - int getErrorCode(); - - // required string msg = 3; - boolean hasMsg(); - String getMsg(); - } - public static final class ErrorReportRequestProto extends - com.google.protobuf.GeneratedMessage - implements ErrorReportRequestProtoOrBuilder { - // Use ErrorReportRequestProto.newBuilder() to construct. - private ErrorReportRequestProto(Builder builder) { - super(builder); - } - private ErrorReportRequestProto(boolean noInit) {} - - private static final ErrorReportRequestProto defaultInstance; - public static ErrorReportRequestProto getDefaultInstance() { - return defaultInstance; - } - - public ErrorReportRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportRequestProto_fieldAccessorTable; - } - - public enum ErrorCode - implements com.google.protobuf.ProtocolMessageEnum { - NOTIFY(0, 0), - DISK_ERROR(1, 1), - INVALID_BLOCK(2, 2), - FATAL_DISK_ERROR(3, 3), - ; - - public static final int NOTIFY_VALUE = 0; - public static final int DISK_ERROR_VALUE = 1; - public static final int INVALID_BLOCK_VALUE = 2; - public static final int FATAL_DISK_ERROR_VALUE = 3; - - - public final int getNumber() { return value; } - - public static ErrorCode valueOf(int value) { - switch (value) { - case 0: return NOTIFY; - case 1: return DISK_ERROR; - case 2: return INVALID_BLOCK; - case 3: return FATAL_DISK_ERROR; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public ErrorCode findValueByNumber(int number) { - return ErrorCode.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDescriptor().getEnumTypes().get(0); - } - - private static final ErrorCode[] VALUES = { - NOTIFY, DISK_ERROR, INVALID_BLOCK, FATAL_DISK_ERROR, - }; - - public static ErrorCode valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private ErrorCode(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:ErrorReportRequestProto.ErrorCode) - } - - private int bitField0_; - // required .DatanodeRegistrationProto registartion = 1; - public static final int REGISTARTION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registartion_; - public boolean hasRegistartion() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistartion() { - return registartion_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistartionOrBuilder() { - return registartion_; - } - - // required uint32 errorCode = 2; - public static final int ERRORCODE_FIELD_NUMBER = 2; - private int errorCode_; - public boolean hasErrorCode() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getErrorCode() { - return errorCode_; - } - - // required string msg = 3; - public static final int MSG_FIELD_NUMBER = 3; - private java.lang.Object msg_; - public boolean hasMsg() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getMsg() { - java.lang.Object ref = msg_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - msg_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getMsgBytes() { - java.lang.Object ref = msg_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - msg_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - errorCode_ = 0; - msg_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRegistartion()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasErrorCode()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasMsg()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegistartion().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, registartion_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt32(2, errorCode_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getMsgBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, registartion_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(2, errorCode_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getMsgBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto) obj; - - boolean result = true; - result = result && (hasRegistartion() == other.hasRegistartion()); - if (hasRegistartion()) { - result = result && getRegistartion() - .equals(other.getRegistartion()); - } - result = result && (hasErrorCode() == other.hasErrorCode()); - if (hasErrorCode()) { - result = result && (getErrorCode() - == other.getErrorCode()); - } - result = result && (hasMsg() == other.hasMsg()); - if (hasMsg()) { - result = result && getMsg() - .equals(other.getMsg()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegistartion()) { - hash = (37 * hash) + REGISTARTION_FIELD_NUMBER; - hash = (53 * hash) + getRegistartion().hashCode(); - } - if (hasErrorCode()) { - hash = (37 * hash) + ERRORCODE_FIELD_NUMBER; - hash = (53 * hash) + getErrorCode(); - } - if (hasMsg()) { - hash = (37 * hash) + MSG_FIELD_NUMBER; - hash = (53 * hash) + getMsg().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegistartionFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (registartionBuilder_ == null) { - registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - } else { - registartionBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - errorCode_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - msg_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (registartionBuilder_ == null) { - result.registartion_ = registartion_; - } else { - result.registartion_ = registartionBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.errorCode_ = errorCode_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.msg_ = msg_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance()) return this; - if (other.hasRegistartion()) { - mergeRegistartion(other.getRegistartion()); - } - if (other.hasErrorCode()) { - setErrorCode(other.getErrorCode()); - } - if (other.hasMsg()) { - setMsg(other.getMsg()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRegistartion()) { - - return false; - } - if (!hasErrorCode()) { - - return false; - } - if (!hasMsg()) { - - return false; - } - if (!getRegistartion().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(); - if (hasRegistartion()) { - subBuilder.mergeFrom(getRegistartion()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegistartion(subBuilder.buildPartial()); - break; - } - case 16: { - bitField0_ |= 0x00000002; - errorCode_ = input.readUInt32(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - msg_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required .DatanodeRegistrationProto registartion = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registartionBuilder_; - public boolean hasRegistartion() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistartion() { - if (registartionBuilder_ == null) { - return registartion_; - } else { - return registartionBuilder_.getMessage(); - } - } - public Builder setRegistartion(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) { - if (registartionBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - registartion_ = value; - onChanged(); - } else { - registartionBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setRegistartion( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) { - if (registartionBuilder_ == null) { - registartion_ = builderForValue.build(); - onChanged(); - } else { - registartionBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeRegistartion(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) { - if (registartionBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - registartion_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) { - registartion_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registartion_).mergeFrom(value).buildPartial(); - } else { - registartion_ = value; - } - onChanged(); - } else { - registartionBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearRegistartion() { - if (registartionBuilder_ == null) { - registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance(); - onChanged(); - } else { - registartionBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistartionBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegistartionFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistartionOrBuilder() { - if (registartionBuilder_ != null) { - return registartionBuilder_.getMessageOrBuilder(); - } else { - return registartion_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> - getRegistartionFieldBuilder() { - if (registartionBuilder_ == null) { - registartionBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>( - registartion_, - getParentForChildren(), - isClean()); - registartion_ = null; - } - return registartionBuilder_; - } - - // required uint32 errorCode = 2; - private int errorCode_ ; - public boolean hasErrorCode() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getErrorCode() { - return errorCode_; - } - public Builder setErrorCode(int value) { - bitField0_ |= 0x00000002; - errorCode_ = value; - onChanged(); - return this; - } - public Builder clearErrorCode() { - bitField0_ = (bitField0_ & ~0x00000002); - errorCode_ = 0; - onChanged(); - return this; - } - - // required string msg = 3; - private java.lang.Object msg_ = ""; - public boolean hasMsg() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getMsg() { - java.lang.Object ref = msg_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - msg_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setMsg(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - msg_ = value; - onChanged(); - return this; - } - public Builder clearMsg() { - bitField0_ = (bitField0_ & ~0x00000004); - msg_ = getDefaultInstance().getMsg(); - onChanged(); - return this; - } - void setMsg(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000004; - msg_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:ErrorReportRequestProto) - } - - static { - defaultInstance = new ErrorReportRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ErrorReportRequestProto) - } - - public interface ErrorReportResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class ErrorReportResponseProto extends - com.google.protobuf.GeneratedMessage - implements ErrorReportResponseProtoOrBuilder { - // Use ErrorReportResponseProto.newBuilder() to construct. - private ErrorReportResponseProto(Builder builder) { - super(builder); - } - private ErrorReportResponseProto(boolean noInit) {} - - private static final ErrorReportResponseProto defaultInstance; - public static ErrorReportResponseProto getDefaultInstance() { - return defaultInstance; - } - - public ErrorReportResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:ErrorReportResponseProto) - } - - static { - defaultInstance = new ErrorReportResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ErrorReportResponseProto) - } - - public interface ProcessUpgradeRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional .UpgradeCommandProto cmd = 1; - boolean hasCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder(); - } - public static final class ProcessUpgradeRequestProto extends - com.google.protobuf.GeneratedMessage - implements ProcessUpgradeRequestProtoOrBuilder { - // Use ProcessUpgradeRequestProto.newBuilder() to construct. - private ProcessUpgradeRequestProto(Builder builder) { - super(builder); - } - private ProcessUpgradeRequestProto(boolean noInit) {} - - private static final ProcessUpgradeRequestProto defaultInstance; - public static ProcessUpgradeRequestProto getDefaultInstance() { - return defaultInstance; - } - - public ProcessUpgradeRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // optional .UpgradeCommandProto cmd = 1; - public static final int CMD_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto cmd_; - public boolean hasCmd() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd() { - return cmd_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder() { - return cmd_; - } - - private void initFields() { - cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (hasCmd()) { - if (!getCmd().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, cmd_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, cmd_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto) obj; - - boolean result = true; - result = result && (hasCmd() == other.hasCmd()); - if (hasCmd()) { - result = result && getCmd() - .equals(other.getCmd()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasCmd()) { - hash = (37 * hash) + CMD_FIELD_NUMBER; - hash = (53 * hash) + getCmd().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getCmdFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (cmdBuilder_ == null) { - cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance(); - } else { - cmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (cmdBuilder_ == null) { - result.cmd_ = cmd_; - } else { - result.cmd_ = cmdBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.getDefaultInstance()) return this; - if (other.hasCmd()) { - mergeCmd(other.getCmd()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (hasCmd()) { - if (!getCmd().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder(); - if (hasCmd()) { - subBuilder.mergeFrom(getCmd()); - } - input.readMessage(subBuilder, extensionRegistry); - setCmd(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // optional .UpgradeCommandProto cmd = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder> cmdBuilder_; - public boolean hasCmd() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd() { - if (cmdBuilder_ == null) { - return cmd_; - } else { - return cmdBuilder_.getMessage(); - } - } - public Builder setCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) { - if (cmdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - cmd_ = value; - onChanged(); - } else { - cmdBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setCmd( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder builderForValue) { - if (cmdBuilder_ == null) { - cmd_ = builderForValue.build(); - onChanged(); - } else { - cmdBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) { - if (cmdBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - cmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance()) { - cmd_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder(cmd_).mergeFrom(value).buildPartial(); - } else { - cmd_ = value; - } - onChanged(); - } else { - cmdBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearCmd() { - if (cmdBuilder_ == null) { - cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance(); - onChanged(); - } else { - cmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder getCmdBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getCmdFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder() { - if (cmdBuilder_ != null) { - return cmdBuilder_.getMessageOrBuilder(); - } else { - return cmd_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder> - getCmdFieldBuilder() { - if (cmdBuilder_ == null) { - cmdBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder>( - cmd_, - getParentForChildren(), - isClean()); - cmd_ = null; - } - return cmdBuilder_; - } - - // @@protoc_insertion_point(builder_scope:ProcessUpgradeRequestProto) - } - - static { - defaultInstance = new ProcessUpgradeRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ProcessUpgradeRequestProto) - } - - public interface ProcessUpgradeResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional .UpgradeCommandProto cmd = 1; - boolean hasCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd(); - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder(); - } - public static final class ProcessUpgradeResponseProto extends - com.google.protobuf.GeneratedMessage - implements ProcessUpgradeResponseProtoOrBuilder { - // Use ProcessUpgradeResponseProto.newBuilder() to construct. - private ProcessUpgradeResponseProto(Builder builder) { - super(builder); - } - private ProcessUpgradeResponseProto(boolean noInit) {} - - private static final ProcessUpgradeResponseProto defaultInstance; - public static ProcessUpgradeResponseProto getDefaultInstance() { - return defaultInstance; - } - - public ProcessUpgradeResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // optional .UpgradeCommandProto cmd = 1; - public static final int CMD_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto cmd_; - public boolean hasCmd() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd() { - return cmd_; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder() { - return cmd_; - } - - private void initFields() { - cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (hasCmd()) { - if (!getCmd().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, cmd_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, cmd_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto) obj; - - boolean result = true; - result = result && (hasCmd() == other.hasCmd()); - if (hasCmd()) { - result = result && getCmd() - .equals(other.getCmd()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasCmd()) { - hash = (37 * hash) + CMD_FIELD_NUMBER; - hash = (53 * hash) + getCmd().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getCmdFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (cmdBuilder_ == null) { - cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance(); - } else { - cmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (cmdBuilder_ == null) { - result.cmd_ = cmd_; - } else { - result.cmd_ = cmdBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance()) return this; - if (other.hasCmd()) { - mergeCmd(other.getCmd()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (hasCmd()) { - if (!getCmd().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder(); - if (hasCmd()) { - subBuilder.mergeFrom(getCmd()); - } - input.readMessage(subBuilder, extensionRegistry); - setCmd(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // optional .UpgradeCommandProto cmd = 1; - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder> cmdBuilder_; - public boolean hasCmd() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd() { - if (cmdBuilder_ == null) { - return cmd_; - } else { - return cmdBuilder_.getMessage(); - } - } - public Builder setCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) { - if (cmdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - cmd_ = value; - onChanged(); - } else { - cmdBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setCmd( - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder builderForValue) { - if (cmdBuilder_ == null) { - cmd_ = builderForValue.build(); - onChanged(); - } else { - cmdBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) { - if (cmdBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - cmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance()) { - cmd_ = - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder(cmd_).mergeFrom(value).buildPartial(); - } else { - cmd_ = value; - } - onChanged(); - } else { - cmdBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearCmd() { - if (cmdBuilder_ == null) { - cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance(); - onChanged(); - } else { - cmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder getCmdBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getCmdFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder() { - if (cmdBuilder_ != null) { - return cmdBuilder_.getMessageOrBuilder(); - } else { - return cmd_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder> - getCmdFieldBuilder() { - if (cmdBuilder_ == null) { - cmdBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder>( - cmd_, - getParentForChildren(), - isClean()); - cmd_ = null; - } - return cmdBuilder_; - } - - // @@protoc_insertion_point(builder_scope:ProcessUpgradeResponseProto) - } - - static { - defaultInstance = new ProcessUpgradeResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ProcessUpgradeResponseProto) - } - - public interface ReportBadBlocksRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .LocatedBlockProto blocks = 1; - java.util.List - getBlocksList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index); - int getBlocksCount(); - java.util.List - getBlocksOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( - int index); - } - public static final class ReportBadBlocksRequestProto extends - com.google.protobuf.GeneratedMessage - implements ReportBadBlocksRequestProtoOrBuilder { - // Use ReportBadBlocksRequestProto.newBuilder() to construct. - private ReportBadBlocksRequestProto(Builder builder) { - super(builder); - } - private ReportBadBlocksRequestProto(boolean noInit) {} - - private static final ReportBadBlocksRequestProto defaultInstance; - public static ReportBadBlocksRequestProto getDefaultInstance() { - return defaultInstance; - } - - public ReportBadBlocksRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_fieldAccessorTable; - } - - // repeated .LocatedBlockProto blocks = 1; - public static final int BLOCKS_FIELD_NUMBER = 1; - private java.util.List blocks_; - public java.util.List getBlocksList() { - return blocks_; - } - public java.util.List - getBlocksOrBuilderList() { - return blocks_; - } - public int getBlocksCount() { - return blocks_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) { - return blocks_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( - int index) { - return blocks_.get(index); - } - - private void initFields() { - blocks_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getBlocksCount(); i++) { - if (!getBlocks(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < blocks_.size(); i++) { - output.writeMessage(1, blocks_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < blocks_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, blocks_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto) obj; - - boolean result = true; - result = result && getBlocksList() - .equals(other.getBlocksList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getBlocksCount() > 0) { - hash = (37 * hash) + BLOCKS_FIELD_NUMBER; - hash = (53 * hash) + getBlocksList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlocksFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blocksBuilder_ == null) { - blocks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - blocksBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto(this); - int from_bitField0_ = bitField0_; - if (blocksBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - blocks_ = java.util.Collections.unmodifiableList(blocks_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.blocks_ = blocks_; - } else { - result.blocks_ = blocksBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance()) return this; - if (blocksBuilder_ == null) { - if (!other.blocks_.isEmpty()) { - if (blocks_.isEmpty()) { - blocks_ = other.blocks_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureBlocksIsMutable(); - blocks_.addAll(other.blocks_); - } - onChanged(); - } - } else { - if (!other.blocks_.isEmpty()) { - if (blocksBuilder_.isEmpty()) { - blocksBuilder_.dispose(); - blocksBuilder_ = null; - blocks_ = other.blocks_; - bitField0_ = (bitField0_ & ~0x00000001); - blocksBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getBlocksFieldBuilder() : null; - } else { - blocksBuilder_.addAllMessages(other.blocks_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getBlocksCount(); i++) { - if (!getBlocks(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addBlocks(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // repeated .LocatedBlockProto blocks = 1; - private java.util.List blocks_ = - java.util.Collections.emptyList(); - private void ensureBlocksIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - blocks_ = new java.util.ArrayList(blocks_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blocksBuilder_; - - public java.util.List getBlocksList() { - if (blocksBuilder_ == null) { - return java.util.Collections.unmodifiableList(blocks_); - } else { - return blocksBuilder_.getMessageList(); - } - } - public int getBlocksCount() { - if (blocksBuilder_ == null) { - return blocks_.size(); - } else { - return blocksBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) { - if (blocksBuilder_ == null) { - return blocks_.get(index); - } else { - return blocksBuilder_.getMessage(index); - } - } - public Builder setBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.set(index, value); - onChanged(); - } else { - blocksBuilder_.setMessage(index, value); - } - return this; - } - public Builder setBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.set(index, builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.add(value); - onChanged(); - } else { - blocksBuilder_.addMessage(value); - } - return this; - } - public Builder addBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.add(index, value); - onChanged(); - } else { - blocksBuilder_.addMessage(index, value); - } - return this; - } - public Builder addBlocks( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.add(builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.add(index, builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllBlocks( - java.lang.Iterable values) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - super.addAll(values, blocks_); - onChanged(); - } else { - blocksBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearBlocks() { - if (blocksBuilder_ == null) { - blocks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - blocksBuilder_.clear(); - } - return this; - } - public Builder removeBlocks(int index) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.remove(index); - onChanged(); - } else { - blocksBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlocksBuilder( - int index) { - return getBlocksFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( - int index) { - if (blocksBuilder_ == null) { - return blocks_.get(index); } else { - return blocksBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getBlocksOrBuilderList() { - if (blocksBuilder_ != null) { - return blocksBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(blocks_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder() { - return getBlocksFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder( - int index) { - return getBlocksFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()); - } - public java.util.List - getBlocksBuilderList() { - return getBlocksFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> - getBlocksFieldBuilder() { - if (blocksBuilder_ == null) { - blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( - blocks_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - blocks_ = null; - } - return blocksBuilder_; - } - - // @@protoc_insertion_point(builder_scope:ReportBadBlocksRequestProto) - } - - static { - defaultInstance = new ReportBadBlocksRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ReportBadBlocksRequestProto) - } - - public interface ReportBadBlocksResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class ReportBadBlocksResponseProto extends - com.google.protobuf.GeneratedMessage - implements ReportBadBlocksResponseProtoOrBuilder { - // Use ReportBadBlocksResponseProto.newBuilder() to construct. - private ReportBadBlocksResponseProto(Builder builder) { - super(builder); - } - private ReportBadBlocksResponseProto(boolean noInit) {} - - private static final ReportBadBlocksResponseProto defaultInstance; - public static ReportBadBlocksResponseProto getDefaultInstance() { - return defaultInstance; - } - - public ReportBadBlocksResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:ReportBadBlocksResponseProto) - } - - static { - defaultInstance = new ReportBadBlocksResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ReportBadBlocksResponseProto) - } - - public interface CommitBlockSynchronizationRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ExtendedBlockProto block = 1; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder(); - - // required uint64 newGenStamp = 2; - boolean hasNewGenStamp(); - long getNewGenStamp(); - - // required uint64 newLength = 3; - boolean hasNewLength(); - long getNewLength(); - - // required bool closeFile = 4; - boolean hasCloseFile(); - boolean getCloseFile(); - - // required bool deleteBlock = 5; - boolean hasDeleteBlock(); - boolean getDeleteBlock(); - - // repeated .DatanodeIDProto newTaragets = 6; - java.util.List - getNewTaragetsList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewTaragets(int index); - int getNewTaragetsCount(); - java.util.List - getNewTaragetsOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewTaragetsOrBuilder( - int index); - } - public static final class CommitBlockSynchronizationRequestProto extends - com.google.protobuf.GeneratedMessage - implements CommitBlockSynchronizationRequestProtoOrBuilder { - // Use CommitBlockSynchronizationRequestProto.newBuilder() to construct. - private CommitBlockSynchronizationRequestProto(Builder builder) { - super(builder); - } - private CommitBlockSynchronizationRequestProto(boolean noInit) {} - - private static final CommitBlockSynchronizationRequestProto defaultInstance; - public static CommitBlockSynchronizationRequestProto getDefaultInstance() { - return defaultInstance; - } - - public CommitBlockSynchronizationRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ExtendedBlockProto block = 1; - public static final int BLOCK_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - // required uint64 newGenStamp = 2; - public static final int NEWGENSTAMP_FIELD_NUMBER = 2; - private long newGenStamp_; - public boolean hasNewGenStamp() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getNewGenStamp() { - return newGenStamp_; - } - - // required uint64 newLength = 3; - public static final int NEWLENGTH_FIELD_NUMBER = 3; - private long newLength_; - public boolean hasNewLength() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getNewLength() { - return newLength_; - } - - // required bool closeFile = 4; - public static final int CLOSEFILE_FIELD_NUMBER = 4; - private boolean closeFile_; - public boolean hasCloseFile() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public boolean getCloseFile() { - return closeFile_; - } - - // required bool deleteBlock = 5; - public static final int DELETEBLOCK_FIELD_NUMBER = 5; - private boolean deleteBlock_; - public boolean hasDeleteBlock() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public boolean getDeleteBlock() { - return deleteBlock_; - } - - // repeated .DatanodeIDProto newTaragets = 6; - public static final int NEWTARAGETS_FIELD_NUMBER = 6; - private java.util.List newTaragets_; - public java.util.List getNewTaragetsList() { - return newTaragets_; - } - public java.util.List - getNewTaragetsOrBuilderList() { - return newTaragets_; - } - public int getNewTaragetsCount() { - return newTaragets_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewTaragets(int index) { - return newTaragets_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewTaragetsOrBuilder( - int index) { - return newTaragets_.get(index); - } - - private void initFields() { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - newGenStamp_ = 0L; - newLength_ = 0L; - closeFile_ = false; - deleteBlock_ = false; - newTaragets_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasNewGenStamp()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasNewLength()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCloseFile()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasDeleteBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getNewTaragetsCount(); i++) { - if (!getNewTaragets(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, block_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, newGenStamp_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, newLength_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBool(4, closeFile_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBool(5, deleteBlock_); - } - for (int i = 0; i < newTaragets_.size(); i++) { - output.writeMessage(6, newTaragets_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, block_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, newGenStamp_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, newLength_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(4, closeFile_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(5, deleteBlock_); - } - for (int i = 0; i < newTaragets_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(6, newTaragets_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto) obj; - - boolean result = true; - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && (hasNewGenStamp() == other.hasNewGenStamp()); - if (hasNewGenStamp()) { - result = result && (getNewGenStamp() - == other.getNewGenStamp()); - } - result = result && (hasNewLength() == other.hasNewLength()); - if (hasNewLength()) { - result = result && (getNewLength() - == other.getNewLength()); - } - result = result && (hasCloseFile() == other.hasCloseFile()); - if (hasCloseFile()) { - result = result && (getCloseFile() - == other.getCloseFile()); - } - result = result && (hasDeleteBlock() == other.hasDeleteBlock()); - if (hasDeleteBlock()) { - result = result && (getDeleteBlock() - == other.getDeleteBlock()); - } - result = result && getNewTaragetsList() - .equals(other.getNewTaragetsList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - if (hasNewGenStamp()) { - hash = (37 * hash) + NEWGENSTAMP_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getNewGenStamp()); - } - if (hasNewLength()) { - hash = (37 * hash) + NEWLENGTH_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getNewLength()); - } - if (hasCloseFile()) { - hash = (37 * hash) + CLOSEFILE_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getCloseFile()); - } - if (hasDeleteBlock()) { - hash = (37 * hash) + DELETEBLOCK_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getDeleteBlock()); - } - if (getNewTaragetsCount() > 0) { - hash = (37 * hash) + NEWTARAGETS_FIELD_NUMBER; - hash = (53 * hash) + getNewTaragetsList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - getNewTaragetsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - newGenStamp_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - newLength_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - closeFile_ = false; - bitField0_ = (bitField0_ & ~0x00000008); - deleteBlock_ = false; - bitField0_ = (bitField0_ & ~0x00000010); - if (newTaragetsBuilder_ == null) { - newTaragets_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000020); - } else { - newTaragetsBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.newGenStamp_ = newGenStamp_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.newLength_ = newLength_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.closeFile_ = closeFile_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.deleteBlock_ = deleteBlock_; - if (newTaragetsBuilder_ == null) { - if (((bitField0_ & 0x00000020) == 0x00000020)) { - newTaragets_ = java.util.Collections.unmodifiableList(newTaragets_); - bitField0_ = (bitField0_ & ~0x00000020); - } - result.newTaragets_ = newTaragets_; - } else { - result.newTaragets_ = newTaragetsBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance()) return this; - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - if (other.hasNewGenStamp()) { - setNewGenStamp(other.getNewGenStamp()); - } - if (other.hasNewLength()) { - setNewLength(other.getNewLength()); - } - if (other.hasCloseFile()) { - setCloseFile(other.getCloseFile()); - } - if (other.hasDeleteBlock()) { - setDeleteBlock(other.getDeleteBlock()); - } - if (newTaragetsBuilder_ == null) { - if (!other.newTaragets_.isEmpty()) { - if (newTaragets_.isEmpty()) { - newTaragets_ = other.newTaragets_; - bitField0_ = (bitField0_ & ~0x00000020); - } else { - ensureNewTaragetsIsMutable(); - newTaragets_.addAll(other.newTaragets_); - } - onChanged(); - } - } else { - if (!other.newTaragets_.isEmpty()) { - if (newTaragetsBuilder_.isEmpty()) { - newTaragetsBuilder_.dispose(); - newTaragetsBuilder_ = null; - newTaragets_ = other.newTaragets_; - bitField0_ = (bitField0_ & ~0x00000020); - newTaragetsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getNewTaragetsFieldBuilder() : null; - } else { - newTaragetsBuilder_.addAllMessages(other.newTaragets_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlock()) { - - return false; - } - if (!hasNewGenStamp()) { - - return false; - } - if (!hasNewLength()) { - - return false; - } - if (!hasCloseFile()) { - - return false; - } - if (!hasDeleteBlock()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - for (int i = 0; i < getNewTaragetsCount(); i++) { - if (!getNewTaragets(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - case 16: { - bitField0_ |= 0x00000002; - newGenStamp_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - newLength_ = input.readUInt64(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - closeFile_ = input.readBool(); - break; - } - case 40: { - bitField0_ |= 0x00000010; - deleteBlock_ = input.readBool(); - break; - } - case 50: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addNewTaragets(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ExtendedBlockProto block = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // required uint64 newGenStamp = 2; - private long newGenStamp_ ; - public boolean hasNewGenStamp() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getNewGenStamp() { - return newGenStamp_; - } - public Builder setNewGenStamp(long value) { - bitField0_ |= 0x00000002; - newGenStamp_ = value; - onChanged(); - return this; - } - public Builder clearNewGenStamp() { - bitField0_ = (bitField0_ & ~0x00000002); - newGenStamp_ = 0L; - onChanged(); - return this; - } - - // required uint64 newLength = 3; - private long newLength_ ; - public boolean hasNewLength() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getNewLength() { - return newLength_; - } - public Builder setNewLength(long value) { - bitField0_ |= 0x00000004; - newLength_ = value; - onChanged(); - return this; - } - public Builder clearNewLength() { - bitField0_ = (bitField0_ & ~0x00000004); - newLength_ = 0L; - onChanged(); - return this; - } - - // required bool closeFile = 4; - private boolean closeFile_ ; - public boolean hasCloseFile() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public boolean getCloseFile() { - return closeFile_; - } - public Builder setCloseFile(boolean value) { - bitField0_ |= 0x00000008; - closeFile_ = value; - onChanged(); - return this; - } - public Builder clearCloseFile() { - bitField0_ = (bitField0_ & ~0x00000008); - closeFile_ = false; - onChanged(); - return this; - } - - // required bool deleteBlock = 5; - private boolean deleteBlock_ ; - public boolean hasDeleteBlock() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public boolean getDeleteBlock() { - return deleteBlock_; - } - public Builder setDeleteBlock(boolean value) { - bitField0_ |= 0x00000010; - deleteBlock_ = value; - onChanged(); - return this; - } - public Builder clearDeleteBlock() { - bitField0_ = (bitField0_ & ~0x00000010); - deleteBlock_ = false; - onChanged(); - return this; - } - - // repeated .DatanodeIDProto newTaragets = 6; - private java.util.List newTaragets_ = - java.util.Collections.emptyList(); - private void ensureNewTaragetsIsMutable() { - if (!((bitField0_ & 0x00000020) == 0x00000020)) { - newTaragets_ = new java.util.ArrayList(newTaragets_); - bitField0_ |= 0x00000020; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> newTaragetsBuilder_; - - public java.util.List getNewTaragetsList() { - if (newTaragetsBuilder_ == null) { - return java.util.Collections.unmodifiableList(newTaragets_); - } else { - return newTaragetsBuilder_.getMessageList(); - } - } - public int getNewTaragetsCount() { - if (newTaragetsBuilder_ == null) { - return newTaragets_.size(); - } else { - return newTaragetsBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewTaragets(int index) { - if (newTaragetsBuilder_ == null) { - return newTaragets_.get(index); - } else { - return newTaragetsBuilder_.getMessage(index); - } - } - public Builder setNewTaragets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { - if (newTaragetsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureNewTaragetsIsMutable(); - newTaragets_.set(index, value); - onChanged(); - } else { - newTaragetsBuilder_.setMessage(index, value); - } - return this; - } - public Builder setNewTaragets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { - if (newTaragetsBuilder_ == null) { - ensureNewTaragetsIsMutable(); - newTaragets_.set(index, builderForValue.build()); - onChanged(); - } else { - newTaragetsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addNewTaragets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { - if (newTaragetsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureNewTaragetsIsMutable(); - newTaragets_.add(value); - onChanged(); - } else { - newTaragetsBuilder_.addMessage(value); - } - return this; - } - public Builder addNewTaragets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { - if (newTaragetsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureNewTaragetsIsMutable(); - newTaragets_.add(index, value); - onChanged(); - } else { - newTaragetsBuilder_.addMessage(index, value); - } - return this; - } - public Builder addNewTaragets( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { - if (newTaragetsBuilder_ == null) { - ensureNewTaragetsIsMutable(); - newTaragets_.add(builderForValue.build()); - onChanged(); - } else { - newTaragetsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addNewTaragets( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { - if (newTaragetsBuilder_ == null) { - ensureNewTaragetsIsMutable(); - newTaragets_.add(index, builderForValue.build()); - onChanged(); - } else { - newTaragetsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllNewTaragets( - java.lang.Iterable values) { - if (newTaragetsBuilder_ == null) { - ensureNewTaragetsIsMutable(); - super.addAll(values, newTaragets_); - onChanged(); - } else { - newTaragetsBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearNewTaragets() { - if (newTaragetsBuilder_ == null) { - newTaragets_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000020); - onChanged(); - } else { - newTaragetsBuilder_.clear(); - } - return this; - } - public Builder removeNewTaragets(int index) { - if (newTaragetsBuilder_ == null) { - ensureNewTaragetsIsMutable(); - newTaragets_.remove(index); - onChanged(); - } else { - newTaragetsBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getNewTaragetsBuilder( - int index) { - return getNewTaragetsFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewTaragetsOrBuilder( - int index) { - if (newTaragetsBuilder_ == null) { - return newTaragets_.get(index); } else { - return newTaragetsBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getNewTaragetsOrBuilderList() { - if (newTaragetsBuilder_ != null) { - return newTaragetsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(newTaragets_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addNewTaragetsBuilder() { - return getNewTaragetsFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addNewTaragetsBuilder( - int index) { - return getNewTaragetsFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()); - } - public java.util.List - getNewTaragetsBuilderList() { - return getNewTaragetsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> - getNewTaragetsFieldBuilder() { - if (newTaragetsBuilder_ == null) { - newTaragetsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>( - newTaragets_, - ((bitField0_ & 0x00000020) == 0x00000020), - getParentForChildren(), - isClean()); - newTaragets_ = null; - } - return newTaragetsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:CommitBlockSynchronizationRequestProto) - } - - static { - defaultInstance = new CommitBlockSynchronizationRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:CommitBlockSynchronizationRequestProto) - } - - public interface CommitBlockSynchronizationResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class CommitBlockSynchronizationResponseProto extends - com.google.protobuf.GeneratedMessage - implements CommitBlockSynchronizationResponseProtoOrBuilder { - // Use CommitBlockSynchronizationResponseProto.newBuilder() to construct. - private CommitBlockSynchronizationResponseProto(Builder builder) { - super(builder); - } - private CommitBlockSynchronizationResponseProto(boolean noInit) {} - - private static final CommitBlockSynchronizationResponseProto defaultInstance; - public static CommitBlockSynchronizationResponseProto getDefaultInstance() { - return defaultInstance; - } - - public CommitBlockSynchronizationResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:CommitBlockSynchronizationResponseProto) - } - - static { - defaultInstance = new CommitBlockSynchronizationResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:CommitBlockSynchronizationResponseProto) - } - - public static abstract class DatanodeProtocolService - implements com.google.protobuf.Service { - protected DatanodeProtocolService() {} - - public interface Interface { - public abstract void registerDatanode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void sendHeartbeat( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void blockReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void blockReceivedAndDeleted( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void errorReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void versionRequest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void processUpgrade( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void reportBadBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void commitBlockSynchronization( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request, - com.google.protobuf.RpcCallback done); - - } - - public static com.google.protobuf.Service newReflectiveService( - final Interface impl) { - return new DatanodeProtocolService() { - @java.lang.Override - public void registerDatanode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.registerDatanode(controller, request, done); - } - - @java.lang.Override - public void sendHeartbeat( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.sendHeartbeat(controller, request, done); - } - - @java.lang.Override - public void blockReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.blockReport(controller, request, done); - } - - @java.lang.Override - public void blockReceivedAndDeleted( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.blockReceivedAndDeleted(controller, request, done); - } - - @java.lang.Override - public void errorReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.errorReport(controller, request, done); - } - - @java.lang.Override - public void versionRequest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.versionRequest(controller, request, done); - } - - @java.lang.Override - public void processUpgrade( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.processUpgrade(controller, request, done); - } - - @java.lang.Override - public void reportBadBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.reportBadBlocks(controller, request, done); - } - - @java.lang.Override - public void commitBlockSynchronization( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.commitBlockSynchronization(controller, request, done); - } - - }; - } - - public static com.google.protobuf.BlockingService - newReflectiveBlockingService(final BlockingInterface impl) { - return new com.google.protobuf.BlockingService() { - public final com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptorForType() { - return getDescriptor(); - } - - public final com.google.protobuf.Message callBlockingMethod( - com.google.protobuf.Descriptors.MethodDescriptor method, - com.google.protobuf.RpcController controller, - com.google.protobuf.Message request) - throws com.google.protobuf.ServiceException { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.callBlockingMethod() given method descriptor for " + - "wrong service type."); - } - switch(method.getIndex()) { - case 0: - return impl.registerDatanode(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)request); - case 1: - return impl.sendHeartbeat(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)request); - case 2: - return impl.blockReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)request); - case 3: - return impl.blockReceivedAndDeleted(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)request); - case 4: - return impl.errorReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)request); - case 5: - return impl.versionRequest(controller, (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto)request); - case 6: - return impl.processUpgrade(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto)request); - case 7: - return impl.reportBadBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)request); - case 8: - return impl.commitBlockSynchronization(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)request); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getRequestPrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getRequestPrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance(); - case 4: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance(); - case 5: - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.getDefaultInstance(); - case 6: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.getDefaultInstance(); - case 7: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance(); - case 8: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getResponsePrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getResponsePrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance(); - case 4: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(); - case 5: - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance(); - case 6: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance(); - case 7: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance(); - case 8: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - }; - } - - public abstract void registerDatanode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void sendHeartbeat( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void blockReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void blockReceivedAndDeleted( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void errorReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void versionRequest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void processUpgrade( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void reportBadBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void commitBlockSynchronization( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request, - com.google.protobuf.RpcCallback done); - - public static final - com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.getDescriptor().getServices().get(0); - } - public final com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptorForType() { - return getDescriptor(); - } - - public final void callMethod( - com.google.protobuf.Descriptors.MethodDescriptor method, - com.google.protobuf.RpcController controller, - com.google.protobuf.Message request, - com.google.protobuf.RpcCallback< - com.google.protobuf.Message> done) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.callMethod() given method descriptor for wrong " + - "service type."); - } - switch(method.getIndex()) { - case 0: - this.registerDatanode(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 1: - this.sendHeartbeat(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 2: - this.blockReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 3: - this.blockReceivedAndDeleted(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 4: - this.errorReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 5: - this.versionRequest(controller, (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 6: - this.processUpgrade(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 7: - this.reportBadBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 8: - this.commitBlockSynchronization(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getRequestPrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getRequestPrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance(); - case 4: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance(); - case 5: - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.getDefaultInstance(); - case 6: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.getDefaultInstance(); - case 7: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance(); - case 8: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getResponsePrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getResponsePrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance(); - case 4: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(); - case 5: - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance(); - case 6: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance(); - case 7: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance(); - case 8: - return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public static Stub newStub( - com.google.protobuf.RpcChannel channel) { - return new Stub(channel); - } - - public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService implements Interface { - private Stub(com.google.protobuf.RpcChannel channel) { - this.channel = channel; - } - - private final com.google.protobuf.RpcChannel channel; - - public com.google.protobuf.RpcChannel getChannel() { - return channel; - } - - public void registerDatanode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(0), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance())); - } - - public void sendHeartbeat( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(1), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance())); - } - - public void blockReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(2), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance())); - } - - public void blockReceivedAndDeleted( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(3), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance())); - } - - public void errorReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(4), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance())); - } - - public void versionRequest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(5), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance())); - } - - public void processUpgrade( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(6), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance())); - } - - public void reportBadBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(7), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance())); - } - - public void commitBlockSynchronization( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(8), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance())); - } - } - - public static BlockingInterface newBlockingStub( - com.google.protobuf.BlockingRpcChannel channel) { - return new BlockingStub(channel); - } - - public interface BlockingInterface { - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto registerDatanode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto sendHeartbeat( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto blockReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto errorReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto versionRequest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto processUpgrade( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto reportBadBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto commitBlockSynchronization( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request) - throws com.google.protobuf.ServiceException; - } - - private static final class BlockingStub implements BlockingInterface { - private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { - this.channel = channel; - } - - private final com.google.protobuf.BlockingRpcChannel channel; - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto registerDatanode( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(0), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto sendHeartbeat( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(1), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto blockReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(2), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(3), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto errorReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(4), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto versionRequest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(5), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto processUpgrade( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(6), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto reportBadBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(7), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto commitBlockSynchronization( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(8), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance()); - } - - } - } - - private static com.google.protobuf.Descriptors.Descriptor - internal_static_DatanodeRegistrationProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_DatanodeRegistrationProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_DatanodeCommandProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_DatanodeCommandProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_BalancerBandwidthCommandProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_BalancerBandwidthCommandProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_BlockCommandProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_BlockCommandProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_BlockRecoveryCommandProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_BlockRecoveryCommandProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_FinalizeCommandProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_FinalizeCommandProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_KeyUpdateCommandProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_KeyUpdateCommandProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RegisterCommandProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RegisterCommandProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_UpgradeCommandProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_UpgradeCommandProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RegisterDatanodeRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RegisterDatanodeRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RegisterDatanodeResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RegisterDatanodeResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_HeartbeatRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_HeartbeatRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_HeartbeatResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_HeartbeatResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_BlockReportRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_BlockReportRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_BlockReportResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_BlockReportResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ReceivedDeletedBlockInfoProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ReceivedDeletedBlockInfoProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_BlockReceivedAndDeletedRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_BlockReceivedAndDeletedRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_BlockReceivedAndDeletedResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_BlockReceivedAndDeletedResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ErrorReportRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ErrorReportRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ErrorReportResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ErrorReportResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ProcessUpgradeRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ProcessUpgradeRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ProcessUpgradeResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ProcessUpgradeResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ReportBadBlocksRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ReportBadBlocksRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ReportBadBlocksResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ReportBadBlocksResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_CommitBlockSynchronizationRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_CommitBlockSynchronizationRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_CommitBlockSynchronizationResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_CommitBlockSynchronizationResponseProto_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; - } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n\026DatanodeProtocol.proto\032\nhdfs.proto\"\220\001\n" + - "\031DatanodeRegistrationProto\022$\n\ndatanodeID" + - "\030\001 \002(\0132\020.DatanodeIDProto\022&\n\013storageInfo\030" + - "\002 \002(\0132\021.StorageInfoProto\022%\n\004keys\030\003 \002(\0132\027" + - ".ExportedBlockKeysProto\"\244\004\n\024DatanodeComm" + - "andProto\022+\n\007cmdType\030\001 \002(\0162\032.DatanodeComm" + - "andProto.Type\0223\n\013balancerCmd\030\002 \001(\0132\036.Bal" + - "ancerBandwidthCommandProto\022\"\n\006blkCmd\030\003 \001" + - "(\0132\022.BlockCommandProto\022/\n\013recoveryCmd\030\004 " + - "\001(\0132\032.BlockRecoveryCommandProto\022*\n\013final", - "izeCmd\030\005 \001(\0132\025.FinalizeCommandProto\022,\n\014k" + - "eyUpdateCmd\030\006 \001(\0132\026.KeyUpdateCommandProt" + - "o\022*\n\013registerCmd\030\007 \001(\0132\025.RegisterCommand" + - "Proto\022(\n\nupgradeCmd\030\010 \001(\0132\024.UpgradeComma" + - "ndProto\"\244\001\n\004Type\022\034\n\030BalancerBandwidthCom" + - "mand\020\000\022\020\n\014BlockCommand\020\001\022\030\n\024BlockRecover" + - "yCommand\020\002\022\023\n\017FinalizeCommand\020\003\022\024\n\020KeyUp" + - "dateCommand\020\004\022\023\n\017RegisterCommand\020\005\022\022\n\016Up" + - "gradeCommand\020\006\"2\n\035BalancerBandwidthComma" + - "ndProto\022\021\n\tbandwidth\030\001 \002(\004\"\276\001\n\021BlockComm", - "andProto\022)\n\006action\030\001 \002(\0162\031.BlockCommandP" + - "roto.Action\022\023\n\013blockPoolId\030\002 \002(\t\022\033\n\006bloc" + - "ks\030\003 \003(\0132\013.BlockProto\022$\n\007targets\030\004 \003(\0132\023" + - ".DatanodeInfosProto\"&\n\006Action\022\014\n\010TRANSFE" + - "R\020\001\022\016\n\nINVALIDATE\020\002\"B\n\031BlockRecoveryComm" + - "andProto\022%\n\006blocks\030\001 \003(\0132\025.RecoveringBlo" + - "ckProto\"+\n\024FinalizeCommandProto\022\023\n\013block" + - "PoolId\030\001 \002(\t\">\n\025KeyUpdateCommandProto\022%\n" + - "\004keys\030\001 \002(\0132\027.ExportedBlockKeysProto\"\026\n\024" + - "RegisterCommandProto\"\247\001\n\023UpgradeCommandP", - "roto\022+\n\006action\030\001 \002(\0162\033.UpgradeCommandPro" + - "to.Action\022\017\n\007version\030\002 \002(\r\022\025\n\rupgradeSta" + - "tus\030\003 \002(\r\";\n\006Action\022\013\n\007UNKNOWN\020\000\022\021\n\rREPO" + - "RT_STATUS\020d\022\021\n\rSTART_UPGRADE\020e\"P\n\034Regist" + - "erDatanodeRequestProto\0220\n\014registration\030\001" + - " \002(\0132\032.DatanodeRegistrationProto\"Q\n\035Regi" + - "sterDatanodeResponseProto\0220\n\014registratio" + - "n\030\001 \002(\0132\032.DatanodeRegistrationProto\"\334\001\n\025" + - "HeartbeatRequestProto\0220\n\014registration\030\001 " + - "\002(\0132\032.DatanodeRegistrationProto\022\020\n\010capac", - "ity\030\002 \002(\004\022\017\n\007dfsUsed\030\003 \002(\004\022\021\n\tremaining\030" + - "\004 \002(\004\022\025\n\rblockPoolUsed\030\005 \002(\004\022\027\n\017xmitsInP" + - "rogress\030\006 \002(\r\022\024\n\014xceiverCount\030\007 \002(\r\022\025\n\rf" + - "ailedVolumes\030\010 \002(\r\"=\n\026HeartbeatResponseP" + - "roto\022#\n\004cmds\030\001 \003(\0132\025.DatanodeCommandProt" + - "o\"t\n\027BlockReportRequestProto\0220\n\014registra" + - "tion\030\001 \002(\0132\032.DatanodeRegistrationProto\022\023" + - "\n\013blockPoolId\030\002 \002(\t\022\022\n\006blocks\030\003 \003(\004B\002\020\001\"" + - ">\n\030BlockReportResponseProto\022\"\n\003cmd\030\001 \002(\013" + - "2\025.DatanodeCommandProto\"O\n\035ReceivedDelet", - "edBlockInfoProto\022\032\n\005block\030\001 \002(\0132\013.BlockP" + - "roto\022\022\n\ndeleteHint\030\002 \001(\t\"\234\001\n#BlockReceiv" + - "edAndDeletedRequestProto\0220\n\014registration" + - "\030\001 \002(\0132\032.DatanodeRegistrationProto\022\023\n\013bl" + - "ockPoolId\030\002 \002(\t\022.\n\006blocks\030\003 \003(\0132\036.Receiv" + - "edDeletedBlockInfoProto\"&\n$BlockReceived" + - "AndDeletedResponseProto\"\275\001\n\027ErrorReportR" + - "equestProto\0220\n\014registartion\030\001 \002(\0132\032.Data" + - "nodeRegistrationProto\022\021\n\terrorCode\030\002 \002(\r" + - "\022\013\n\003msg\030\003 \002(\t\"P\n\tErrorCode\022\n\n\006NOTIFY\020\000\022\016", - "\n\nDISK_ERROR\020\001\022\021\n\rINVALID_BLOCK\020\002\022\024\n\020FAT" + - "AL_DISK_ERROR\020\003\"\032\n\030ErrorReportResponsePr" + - "oto\"?\n\032ProcessUpgradeRequestProto\022!\n\003cmd" + - "\030\001 \001(\0132\024.UpgradeCommandProto\"@\n\033ProcessU" + - "pgradeResponseProto\022!\n\003cmd\030\001 \001(\0132\024.Upgra" + - "deCommandProto\"A\n\033ReportBadBlocksRequest" + - "Proto\022\"\n\006blocks\030\001 \003(\0132\022.LocatedBlockProt" + - "o\"\036\n\034ReportBadBlocksResponseProto\"\303\001\n&Co" + - "mmitBlockSynchronizationRequestProto\022\"\n\005" + - "block\030\001 \002(\0132\023.ExtendedBlockProto\022\023\n\013newG", - "enStamp\030\002 \002(\004\022\021\n\tnewLength\030\003 \002(\004\022\021\n\tclos" + - "eFile\030\004 \002(\010\022\023\n\013deleteBlock\030\005 \002(\010\022%\n\013newT" + - "aragets\030\006 \003(\0132\020.DatanodeIDProto\")\n\'Commi" + - "tBlockSynchronizationResponseProto2\353\005\n\027D" + - "atanodeProtocolService\022Q\n\020registerDatano" + - "de\022\035.RegisterDatanodeRequestProto\032\036.Regi" + - "sterDatanodeResponseProto\022@\n\rsendHeartbe" + - "at\022\026.HeartbeatRequestProto\032\027.HeartbeatRe" + - "sponseProto\022B\n\013blockReport\022\030.BlockReport" + - "RequestProto\032\031.BlockReportResponseProto\022", - "f\n\027blockReceivedAndDeleted\022$.BlockReceiv" + - "edAndDeletedRequestProto\032%.BlockReceived" + - "AndDeletedResponseProto\022B\n\013errorReport\022\030" + - ".ErrorReportRequestProto\032\031.ErrorReportRe" + - "sponseProto\022=\n\016versionRequest\022\024.VersionR" + - "equestProto\032\025.VersionResponseProto\022K\n\016pr" + - "ocessUpgrade\022\033.ProcessUpgradeRequestProt" + - "o\032\034.ProcessUpgradeResponseProto\022N\n\017repor" + - "tBadBlocks\022\034.ReportBadBlocksRequestProto" + - "\032\035.ReportBadBlocksResponseProto\022o\n\032commi", - "tBlockSynchronization\022\'.CommitBlockSynch" + - "ronizationRequestProto\032(.CommitBlockSync" + - "hronizationResponseProtoBE\n%org.apache.h" + - "adoop.hdfs.protocol.protoB\026DatanodeProto" + - "colProtos\210\001\001\240\001\001" - }; - com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { - public com.google.protobuf.ExtensionRegistry assignDescriptors( - com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - internal_static_DatanodeRegistrationProto_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_DatanodeRegistrationProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_DatanodeRegistrationProto_descriptor, - new java.lang.String[] { "DatanodeID", "StorageInfo", "Keys", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder.class); - internal_static_DatanodeCommandProto_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_DatanodeCommandProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_DatanodeCommandProto_descriptor, - new java.lang.String[] { "CmdType", "BalancerCmd", "BlkCmd", "RecoveryCmd", "FinalizeCmd", "KeyUpdateCmd", "RegisterCmd", "UpgradeCmd", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder.class); - internal_static_BalancerBandwidthCommandProto_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_BalancerBandwidthCommandProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_BalancerBandwidthCommandProto_descriptor, - new java.lang.String[] { "Bandwidth", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder.class); - internal_static_BlockCommandProto_descriptor = - getDescriptor().getMessageTypes().get(3); - internal_static_BlockCommandProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_BlockCommandProto_descriptor, - new java.lang.String[] { "Action", "BlockPoolId", "Blocks", "Targets", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder.class); - internal_static_BlockRecoveryCommandProto_descriptor = - getDescriptor().getMessageTypes().get(4); - internal_static_BlockRecoveryCommandProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_BlockRecoveryCommandProto_descriptor, - new java.lang.String[] { "Blocks", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder.class); - internal_static_FinalizeCommandProto_descriptor = - getDescriptor().getMessageTypes().get(5); - internal_static_FinalizeCommandProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_FinalizeCommandProto_descriptor, - new java.lang.String[] { "BlockPoolId", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder.class); - internal_static_KeyUpdateCommandProto_descriptor = - getDescriptor().getMessageTypes().get(6); - internal_static_KeyUpdateCommandProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_KeyUpdateCommandProto_descriptor, - new java.lang.String[] { "Keys", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder.class); - internal_static_RegisterCommandProto_descriptor = - getDescriptor().getMessageTypes().get(7); - internal_static_RegisterCommandProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RegisterCommandProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder.class); - internal_static_UpgradeCommandProto_descriptor = - getDescriptor().getMessageTypes().get(8); - internal_static_UpgradeCommandProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_UpgradeCommandProto_descriptor, - new java.lang.String[] { "Action", "Version", "UpgradeStatus", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder.class); - internal_static_RegisterDatanodeRequestProto_descriptor = - getDescriptor().getMessageTypes().get(9); - internal_static_RegisterDatanodeRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RegisterDatanodeRequestProto_descriptor, - new java.lang.String[] { "Registration", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.Builder.class); - internal_static_RegisterDatanodeResponseProto_descriptor = - getDescriptor().getMessageTypes().get(10); - internal_static_RegisterDatanodeResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RegisterDatanodeResponseProto_descriptor, - new java.lang.String[] { "Registration", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.Builder.class); - internal_static_HeartbeatRequestProto_descriptor = - getDescriptor().getMessageTypes().get(11); - internal_static_HeartbeatRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_HeartbeatRequestProto_descriptor, - new java.lang.String[] { "Registration", "Capacity", "DfsUsed", "Remaining", "BlockPoolUsed", "XmitsInProgress", "XceiverCount", "FailedVolumes", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.Builder.class); - internal_static_HeartbeatResponseProto_descriptor = - getDescriptor().getMessageTypes().get(12); - internal_static_HeartbeatResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_HeartbeatResponseProto_descriptor, - new java.lang.String[] { "Cmds", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.Builder.class); - internal_static_BlockReportRequestProto_descriptor = - getDescriptor().getMessageTypes().get(13); - internal_static_BlockReportRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_BlockReportRequestProto_descriptor, - new java.lang.String[] { "Registration", "BlockPoolId", "Blocks", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.Builder.class); - internal_static_BlockReportResponseProto_descriptor = - getDescriptor().getMessageTypes().get(14); - internal_static_BlockReportResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_BlockReportResponseProto_descriptor, - new java.lang.String[] { "Cmd", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.Builder.class); - internal_static_ReceivedDeletedBlockInfoProto_descriptor = - getDescriptor().getMessageTypes().get(15); - internal_static_ReceivedDeletedBlockInfoProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ReceivedDeletedBlockInfoProto_descriptor, - new java.lang.String[] { "Block", "DeleteHint", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder.class); - internal_static_BlockReceivedAndDeletedRequestProto_descriptor = - getDescriptor().getMessageTypes().get(16); - internal_static_BlockReceivedAndDeletedRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_BlockReceivedAndDeletedRequestProto_descriptor, - new java.lang.String[] { "Registration", "BlockPoolId", "Blocks", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.Builder.class); - internal_static_BlockReceivedAndDeletedResponseProto_descriptor = - getDescriptor().getMessageTypes().get(17); - internal_static_BlockReceivedAndDeletedResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_BlockReceivedAndDeletedResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.Builder.class); - internal_static_ErrorReportRequestProto_descriptor = - getDescriptor().getMessageTypes().get(18); - internal_static_ErrorReportRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ErrorReportRequestProto_descriptor, - new java.lang.String[] { "Registartion", "ErrorCode", "Msg", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.Builder.class); - internal_static_ErrorReportResponseProto_descriptor = - getDescriptor().getMessageTypes().get(19); - internal_static_ErrorReportResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ErrorReportResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.Builder.class); - internal_static_ProcessUpgradeRequestProto_descriptor = - getDescriptor().getMessageTypes().get(20); - internal_static_ProcessUpgradeRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ProcessUpgradeRequestProto_descriptor, - new java.lang.String[] { "Cmd", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.Builder.class); - internal_static_ProcessUpgradeResponseProto_descriptor = - getDescriptor().getMessageTypes().get(21); - internal_static_ProcessUpgradeResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ProcessUpgradeResponseProto_descriptor, - new java.lang.String[] { "Cmd", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.Builder.class); - internal_static_ReportBadBlocksRequestProto_descriptor = - getDescriptor().getMessageTypes().get(22); - internal_static_ReportBadBlocksRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ReportBadBlocksRequestProto_descriptor, - new java.lang.String[] { "Blocks", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.Builder.class); - internal_static_ReportBadBlocksResponseProto_descriptor = - getDescriptor().getMessageTypes().get(23); - internal_static_ReportBadBlocksResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ReportBadBlocksResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.Builder.class); - internal_static_CommitBlockSynchronizationRequestProto_descriptor = - getDescriptor().getMessageTypes().get(24); - internal_static_CommitBlockSynchronizationRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_CommitBlockSynchronizationRequestProto_descriptor, - new java.lang.String[] { "Block", "NewGenStamp", "NewLength", "CloseFile", "DeleteBlock", "NewTaragets", }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.Builder.class); - internal_static_CommitBlockSynchronizationResponseProto_descriptor = - getDescriptor().getMessageTypes().get(25); - internal_static_CommitBlockSynchronizationResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_CommitBlockSynchronizationResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.Builder.class); - return null; - } - }; - com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(), - }, assigner); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java deleted file mode 100644 index 258ce6faa64..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java +++ /dev/null @@ -1,21240 +0,0 @@ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: hdfs.proto - -package org.apache.hadoop.hdfs.protocol.proto; - -public final class HdfsProtos { - private HdfsProtos() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistry registry) { - } - public enum ReplicaStateProto - implements com.google.protobuf.ProtocolMessageEnum { - FINALIZED(0, 0), - RBW(1, 1), - RWR(2, 2), - RUR(3, 3), - TEMPORARY(4, 4), - ; - - public static final int FINALIZED_VALUE = 0; - public static final int RBW_VALUE = 1; - public static final int RWR_VALUE = 2; - public static final int RUR_VALUE = 3; - public static final int TEMPORARY_VALUE = 4; - - - public final int getNumber() { return value; } - - public static ReplicaStateProto valueOf(int value) { - switch (value) { - case 0: return FINALIZED; - case 1: return RBW; - case 2: return RWR; - case 3: return RUR; - case 4: return TEMPORARY; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public ReplicaStateProto findValueByNumber(int number) { - return ReplicaStateProto.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(0); - } - - private static final ReplicaStateProto[] VALUES = { - FINALIZED, RBW, RWR, RUR, TEMPORARY, - }; - - public static ReplicaStateProto valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private ReplicaStateProto(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:ReplicaStateProto) - } - - public interface ExtendedBlockProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string poolId = 1; - boolean hasPoolId(); - String getPoolId(); - - // required uint64 blockId = 2; - boolean hasBlockId(); - long getBlockId(); - - // required uint64 generationStamp = 3; - boolean hasGenerationStamp(); - long getGenerationStamp(); - - // optional uint64 numBytes = 4; - boolean hasNumBytes(); - long getNumBytes(); - } - public static final class ExtendedBlockProto extends - com.google.protobuf.GeneratedMessage - implements ExtendedBlockProtoOrBuilder { - // Use ExtendedBlockProto.newBuilder() to construct. - private ExtendedBlockProto(Builder builder) { - super(builder); - } - private ExtendedBlockProto(boolean noInit) {} - - private static final ExtendedBlockProto defaultInstance; - public static ExtendedBlockProto getDefaultInstance() { - return defaultInstance; - } - - public ExtendedBlockProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ExtendedBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ExtendedBlockProto_fieldAccessorTable; - } - - private int bitField0_; - // required string poolId = 1; - public static final int POOLID_FIELD_NUMBER = 1; - private java.lang.Object poolId_; - public boolean hasPoolId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getPoolId() { - java.lang.Object ref = poolId_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - poolId_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getPoolIdBytes() { - java.lang.Object ref = poolId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - poolId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required uint64 blockId = 2; - public static final int BLOCKID_FIELD_NUMBER = 2; - private long blockId_; - public boolean hasBlockId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getBlockId() { - return blockId_; - } - - // required uint64 generationStamp = 3; - public static final int GENERATIONSTAMP_FIELD_NUMBER = 3; - private long generationStamp_; - public boolean hasGenerationStamp() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getGenerationStamp() { - return generationStamp_; - } - - // optional uint64 numBytes = 4; - public static final int NUMBYTES_FIELD_NUMBER = 4; - private long numBytes_; - public boolean hasNumBytes() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public long getNumBytes() { - return numBytes_; - } - - private void initFields() { - poolId_ = ""; - blockId_ = 0L; - generationStamp_ = 0L; - numBytes_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasPoolId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBlockId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasGenerationStamp()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getPoolIdBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, blockId_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, generationStamp_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt64(4, numBytes_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getPoolIdBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, blockId_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, generationStamp_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(4, numBytes_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) obj; - - boolean result = true; - result = result && (hasPoolId() == other.hasPoolId()); - if (hasPoolId()) { - result = result && getPoolId() - .equals(other.getPoolId()); - } - result = result && (hasBlockId() == other.hasBlockId()); - if (hasBlockId()) { - result = result && (getBlockId() - == other.getBlockId()); - } - result = result && (hasGenerationStamp() == other.hasGenerationStamp()); - if (hasGenerationStamp()) { - result = result && (getGenerationStamp() - == other.getGenerationStamp()); - } - result = result && (hasNumBytes() == other.hasNumBytes()); - if (hasNumBytes()) { - result = result && (getNumBytes() - == other.getNumBytes()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasPoolId()) { - hash = (37 * hash) + POOLID_FIELD_NUMBER; - hash = (53 * hash) + getPoolId().hashCode(); - } - if (hasBlockId()) { - hash = (37 * hash) + BLOCKID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getBlockId()); - } - if (hasGenerationStamp()) { - hash = (37 * hash) + GENERATIONSTAMP_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getGenerationStamp()); - } - if (hasNumBytes()) { - hash = (37 * hash) + NUMBYTES_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getNumBytes()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ExtendedBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ExtendedBlockProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - poolId_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - blockId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - generationStamp_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - numBytes_ = 0L; - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.poolId_ = poolId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.blockId_ = blockId_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.generationStamp_ = generationStamp_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.numBytes_ = numBytes_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) return this; - if (other.hasPoolId()) { - setPoolId(other.getPoolId()); - } - if (other.hasBlockId()) { - setBlockId(other.getBlockId()); - } - if (other.hasGenerationStamp()) { - setGenerationStamp(other.getGenerationStamp()); - } - if (other.hasNumBytes()) { - setNumBytes(other.getNumBytes()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasPoolId()) { - - return false; - } - if (!hasBlockId()) { - - return false; - } - if (!hasGenerationStamp()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - poolId_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - blockId_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - generationStamp_ = input.readUInt64(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - numBytes_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required string poolId = 1; - private java.lang.Object poolId_ = ""; - public boolean hasPoolId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getPoolId() { - java.lang.Object ref = poolId_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - poolId_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setPoolId(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - poolId_ = value; - onChanged(); - return this; - } - public Builder clearPoolId() { - bitField0_ = (bitField0_ & ~0x00000001); - poolId_ = getDefaultInstance().getPoolId(); - onChanged(); - return this; - } - void setPoolId(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - poolId_ = value; - onChanged(); - } - - // required uint64 blockId = 2; - private long blockId_ ; - public boolean hasBlockId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getBlockId() { - return blockId_; - } - public Builder setBlockId(long value) { - bitField0_ |= 0x00000002; - blockId_ = value; - onChanged(); - return this; - } - public Builder clearBlockId() { - bitField0_ = (bitField0_ & ~0x00000002); - blockId_ = 0L; - onChanged(); - return this; - } - - // required uint64 generationStamp = 3; - private long generationStamp_ ; - public boolean hasGenerationStamp() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getGenerationStamp() { - return generationStamp_; - } - public Builder setGenerationStamp(long value) { - bitField0_ |= 0x00000004; - generationStamp_ = value; - onChanged(); - return this; - } - public Builder clearGenerationStamp() { - bitField0_ = (bitField0_ & ~0x00000004); - generationStamp_ = 0L; - onChanged(); - return this; - } - - // optional uint64 numBytes = 4; - private long numBytes_ ; - public boolean hasNumBytes() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public long getNumBytes() { - return numBytes_; - } - public Builder setNumBytes(long value) { - bitField0_ |= 0x00000008; - numBytes_ = value; - onChanged(); - return this; - } - public Builder clearNumBytes() { - bitField0_ = (bitField0_ & ~0x00000008); - numBytes_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:ExtendedBlockProto) - } - - static { - defaultInstance = new ExtendedBlockProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ExtendedBlockProto) - } - - public interface BlockTokenIdentifierProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bytes identifier = 1; - boolean hasIdentifier(); - com.google.protobuf.ByteString getIdentifier(); - - // required bytes password = 2; - boolean hasPassword(); - com.google.protobuf.ByteString getPassword(); - - // required string kind = 3; - boolean hasKind(); - String getKind(); - - // required string service = 4; - boolean hasService(); - String getService(); - } - public static final class BlockTokenIdentifierProto extends - com.google.protobuf.GeneratedMessage - implements BlockTokenIdentifierProtoOrBuilder { - // Use BlockTokenIdentifierProto.newBuilder() to construct. - private BlockTokenIdentifierProto(Builder builder) { - super(builder); - } - private BlockTokenIdentifierProto(boolean noInit) {} - - private static final BlockTokenIdentifierProto defaultInstance; - public static BlockTokenIdentifierProto getDefaultInstance() { - return defaultInstance; - } - - public BlockTokenIdentifierProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockTokenIdentifierProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockTokenIdentifierProto_fieldAccessorTable; - } - - private int bitField0_; - // required bytes identifier = 1; - public static final int IDENTIFIER_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString identifier_; - public boolean hasIdentifier() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public com.google.protobuf.ByteString getIdentifier() { - return identifier_; - } - - // required bytes password = 2; - public static final int PASSWORD_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString password_; - public boolean hasPassword() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public com.google.protobuf.ByteString getPassword() { - return password_; - } - - // required string kind = 3; - public static final int KIND_FIELD_NUMBER = 3; - private java.lang.Object kind_; - public boolean hasKind() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getKind() { - java.lang.Object ref = kind_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - kind_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getKindBytes() { - java.lang.Object ref = kind_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - kind_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string service = 4; - public static final int SERVICE_FIELD_NUMBER = 4; - private java.lang.Object service_; - public boolean hasService() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public String getService() { - java.lang.Object ref = service_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - service_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getServiceBytes() { - java.lang.Object ref = service_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - service_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - identifier_ = com.google.protobuf.ByteString.EMPTY; - password_ = com.google.protobuf.ByteString.EMPTY; - kind_ = ""; - service_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasIdentifier()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasPassword()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasKind()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasService()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, identifier_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, password_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getKindBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBytes(4, getServiceBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, identifier_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, password_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getKindBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(4, getServiceBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto) obj; - - boolean result = true; - result = result && (hasIdentifier() == other.hasIdentifier()); - if (hasIdentifier()) { - result = result && getIdentifier() - .equals(other.getIdentifier()); - } - result = result && (hasPassword() == other.hasPassword()); - if (hasPassword()) { - result = result && getPassword() - .equals(other.getPassword()); - } - result = result && (hasKind() == other.hasKind()); - if (hasKind()) { - result = result && getKind() - .equals(other.getKind()); - } - result = result && (hasService() == other.hasService()); - if (hasService()) { - result = result && getService() - .equals(other.getService()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasIdentifier()) { - hash = (37 * hash) + IDENTIFIER_FIELD_NUMBER; - hash = (53 * hash) + getIdentifier().hashCode(); - } - if (hasPassword()) { - hash = (37 * hash) + PASSWORD_FIELD_NUMBER; - hash = (53 * hash) + getPassword().hashCode(); - } - if (hasKind()) { - hash = (37 * hash) + KIND_FIELD_NUMBER; - hash = (53 * hash) + getKind().hashCode(); - } - if (hasService()) { - hash = (37 * hash) + SERVICE_FIELD_NUMBER; - hash = (53 * hash) + getService().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockTokenIdentifierProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockTokenIdentifierProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - identifier_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - password_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - kind_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - service_ = ""; - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.identifier_ = identifier_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.password_ = password_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.kind_ = kind_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.service_ = service_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance()) return this; - if (other.hasIdentifier()) { - setIdentifier(other.getIdentifier()); - } - if (other.hasPassword()) { - setPassword(other.getPassword()); - } - if (other.hasKind()) { - setKind(other.getKind()); - } - if (other.hasService()) { - setService(other.getService()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasIdentifier()) { - - return false; - } - if (!hasPassword()) { - - return false; - } - if (!hasKind()) { - - return false; - } - if (!hasService()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - identifier_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - password_ = input.readBytes(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - kind_ = input.readBytes(); - break; - } - case 34: { - bitField0_ |= 0x00000008; - service_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required bytes identifier = 1; - private com.google.protobuf.ByteString identifier_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasIdentifier() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public com.google.protobuf.ByteString getIdentifier() { - return identifier_; - } - public Builder setIdentifier(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - identifier_ = value; - onChanged(); - return this; - } - public Builder clearIdentifier() { - bitField0_ = (bitField0_ & ~0x00000001); - identifier_ = getDefaultInstance().getIdentifier(); - onChanged(); - return this; - } - - // required bytes password = 2; - private com.google.protobuf.ByteString password_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasPassword() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public com.google.protobuf.ByteString getPassword() { - return password_; - } - public Builder setPassword(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - password_ = value; - onChanged(); - return this; - } - public Builder clearPassword() { - bitField0_ = (bitField0_ & ~0x00000002); - password_ = getDefaultInstance().getPassword(); - onChanged(); - return this; - } - - // required string kind = 3; - private java.lang.Object kind_ = ""; - public boolean hasKind() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getKind() { - java.lang.Object ref = kind_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - kind_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setKind(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - kind_ = value; - onChanged(); - return this; - } - public Builder clearKind() { - bitField0_ = (bitField0_ & ~0x00000004); - kind_ = getDefaultInstance().getKind(); - onChanged(); - return this; - } - void setKind(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000004; - kind_ = value; - onChanged(); - } - - // required string service = 4; - private java.lang.Object service_ = ""; - public boolean hasService() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public String getService() { - java.lang.Object ref = service_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - service_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setService(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - service_ = value; - onChanged(); - return this; - } - public Builder clearService() { - bitField0_ = (bitField0_ & ~0x00000008); - service_ = getDefaultInstance().getService(); - onChanged(); - return this; - } - void setService(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000008; - service_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:BlockTokenIdentifierProto) - } - - static { - defaultInstance = new BlockTokenIdentifierProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:BlockTokenIdentifierProto) - } - - public interface DatanodeIDProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string name = 1; - boolean hasName(); - String getName(); - - // required string storageID = 2; - boolean hasStorageID(); - String getStorageID(); - - // required uint32 infoPort = 3; - boolean hasInfoPort(); - int getInfoPort(); - - // required uint32 ipcPort = 4; - boolean hasIpcPort(); - int getIpcPort(); - } - public static final class DatanodeIDProto extends - com.google.protobuf.GeneratedMessage - implements DatanodeIDProtoOrBuilder { - // Use DatanodeIDProto.newBuilder() to construct. - private DatanodeIDProto(Builder builder) { - super(builder); - } - private DatanodeIDProto(boolean noInit) {} - - private static final DatanodeIDProto defaultInstance; - public static DatanodeIDProto getDefaultInstance() { - return defaultInstance; - } - - public DatanodeIDProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeIDProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeIDProto_fieldAccessorTable; - } - - private int bitField0_; - // required string name = 1; - public static final int NAME_FIELD_NUMBER = 1; - private java.lang.Object name_; - public boolean hasName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getName() { - java.lang.Object ref = name_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - name_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string storageID = 2; - public static final int STORAGEID_FIELD_NUMBER = 2; - private java.lang.Object storageID_; - public boolean hasStorageID() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getStorageID() { - java.lang.Object ref = storageID_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - storageID_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getStorageIDBytes() { - java.lang.Object ref = storageID_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - storageID_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required uint32 infoPort = 3; - public static final int INFOPORT_FIELD_NUMBER = 3; - private int infoPort_; - public boolean hasInfoPort() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public int getInfoPort() { - return infoPort_; - } - - // required uint32 ipcPort = 4; - public static final int IPCPORT_FIELD_NUMBER = 4; - private int ipcPort_; - public boolean hasIpcPort() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public int getIpcPort() { - return ipcPort_; - } - - private void initFields() { - name_ = ""; - storageID_ = ""; - infoPort_ = 0; - ipcPort_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasStorageID()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasInfoPort()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasIpcPort()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getStorageIDBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt32(3, infoPort_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt32(4, ipcPort_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getStorageIDBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(3, infoPort_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(4, ipcPort_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) obj; - - boolean result = true; - result = result && (hasName() == other.hasName()); - if (hasName()) { - result = result && getName() - .equals(other.getName()); - } - result = result && (hasStorageID() == other.hasStorageID()); - if (hasStorageID()) { - result = result && getStorageID() - .equals(other.getStorageID()); - } - result = result && (hasInfoPort() == other.hasInfoPort()); - if (hasInfoPort()) { - result = result && (getInfoPort() - == other.getInfoPort()); - } - result = result && (hasIpcPort() == other.hasIpcPort()); - if (hasIpcPort()) { - result = result && (getIpcPort() - == other.getIpcPort()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasName()) { - hash = (37 * hash) + NAME_FIELD_NUMBER; - hash = (53 * hash) + getName().hashCode(); - } - if (hasStorageID()) { - hash = (37 * hash) + STORAGEID_FIELD_NUMBER; - hash = (53 * hash) + getStorageID().hashCode(); - } - if (hasInfoPort()) { - hash = (37 * hash) + INFOPORT_FIELD_NUMBER; - hash = (53 * hash) + getInfoPort(); - } - if (hasIpcPort()) { - hash = (37 * hash) + IPCPORT_FIELD_NUMBER; - hash = (53 * hash) + getIpcPort(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeIDProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeIDProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - name_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - storageID_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - infoPort_ = 0; - bitField0_ = (bitField0_ & ~0x00000004); - ipcPort_ = 0; - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.name_ = name_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.storageID_ = storageID_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.infoPort_ = infoPort_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.ipcPort_ = ipcPort_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) return this; - if (other.hasName()) { - setName(other.getName()); - } - if (other.hasStorageID()) { - setStorageID(other.getStorageID()); - } - if (other.hasInfoPort()) { - setInfoPort(other.getInfoPort()); - } - if (other.hasIpcPort()) { - setIpcPort(other.getIpcPort()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasName()) { - - return false; - } - if (!hasStorageID()) { - - return false; - } - if (!hasInfoPort()) { - - return false; - } - if (!hasIpcPort()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - name_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - storageID_ = input.readBytes(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - infoPort_ = input.readUInt32(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - ipcPort_ = input.readUInt32(); - break; - } - } - } - } - - private int bitField0_; - - // required string name = 1; - private java.lang.Object name_ = ""; - public boolean hasName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getName() { - java.lang.Object ref = name_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - name_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setName(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - name_ = value; - onChanged(); - return this; - } - public Builder clearName() { - bitField0_ = (bitField0_ & ~0x00000001); - name_ = getDefaultInstance().getName(); - onChanged(); - return this; - } - void setName(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - name_ = value; - onChanged(); - } - - // required string storageID = 2; - private java.lang.Object storageID_ = ""; - public boolean hasStorageID() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getStorageID() { - java.lang.Object ref = storageID_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - storageID_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setStorageID(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - storageID_ = value; - onChanged(); - return this; - } - public Builder clearStorageID() { - bitField0_ = (bitField0_ & ~0x00000002); - storageID_ = getDefaultInstance().getStorageID(); - onChanged(); - return this; - } - void setStorageID(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - storageID_ = value; - onChanged(); - } - - // required uint32 infoPort = 3; - private int infoPort_ ; - public boolean hasInfoPort() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public int getInfoPort() { - return infoPort_; - } - public Builder setInfoPort(int value) { - bitField0_ |= 0x00000004; - infoPort_ = value; - onChanged(); - return this; - } - public Builder clearInfoPort() { - bitField0_ = (bitField0_ & ~0x00000004); - infoPort_ = 0; - onChanged(); - return this; - } - - // required uint32 ipcPort = 4; - private int ipcPort_ ; - public boolean hasIpcPort() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public int getIpcPort() { - return ipcPort_; - } - public Builder setIpcPort(int value) { - bitField0_ |= 0x00000008; - ipcPort_ = value; - onChanged(); - return this; - } - public Builder clearIpcPort() { - bitField0_ = (bitField0_ & ~0x00000008); - ipcPort_ = 0; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:DatanodeIDProto) - } - - static { - defaultInstance = new DatanodeIDProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:DatanodeIDProto) - } - - public interface DatanodeInfosProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .DatanodeInfoProto datanodes = 1; - java.util.List - getDatanodesList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index); - int getDatanodesCount(); - java.util.List - getDatanodesOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder( - int index); - } - public static final class DatanodeInfosProto extends - com.google.protobuf.GeneratedMessage - implements DatanodeInfosProtoOrBuilder { - // Use DatanodeInfosProto.newBuilder() to construct. - private DatanodeInfosProto(Builder builder) { - super(builder); - } - private DatanodeInfosProto(boolean noInit) {} - - private static final DatanodeInfosProto defaultInstance; - public static DatanodeInfosProto getDefaultInstance() { - return defaultInstance; - } - - public DatanodeInfosProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfosProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfosProto_fieldAccessorTable; - } - - // repeated .DatanodeInfoProto datanodes = 1; - public static final int DATANODES_FIELD_NUMBER = 1; - private java.util.List datanodes_; - public java.util.List getDatanodesList() { - return datanodes_; - } - public java.util.List - getDatanodesOrBuilderList() { - return datanodes_; - } - public int getDatanodesCount() { - return datanodes_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index) { - return datanodes_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder( - int index) { - return datanodes_.get(index); - } - - private void initFields() { - datanodes_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getDatanodesCount(); i++) { - if (!getDatanodes(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < datanodes_.size(); i++) { - output.writeMessage(1, datanodes_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < datanodes_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, datanodes_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) obj; - - boolean result = true; - result = result && getDatanodesList() - .equals(other.getDatanodesList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getDatanodesCount() > 0) { - hash = (37 * hash) + DATANODES_FIELD_NUMBER; - hash = (53 * hash) + getDatanodesList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfosProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfosProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getDatanodesFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (datanodesBuilder_ == null) { - datanodes_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - datanodesBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto(this); - int from_bitField0_ = bitField0_; - if (datanodesBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - datanodes_ = java.util.Collections.unmodifiableList(datanodes_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.datanodes_ = datanodes_; - } else { - result.datanodes_ = datanodesBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance()) return this; - if (datanodesBuilder_ == null) { - if (!other.datanodes_.isEmpty()) { - if (datanodes_.isEmpty()) { - datanodes_ = other.datanodes_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureDatanodesIsMutable(); - datanodes_.addAll(other.datanodes_); - } - onChanged(); - } - } else { - if (!other.datanodes_.isEmpty()) { - if (datanodesBuilder_.isEmpty()) { - datanodesBuilder_.dispose(); - datanodesBuilder_ = null; - datanodes_ = other.datanodes_; - bitField0_ = (bitField0_ & ~0x00000001); - datanodesBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getDatanodesFieldBuilder() : null; - } else { - datanodesBuilder_.addAllMessages(other.datanodes_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getDatanodesCount(); i++) { - if (!getDatanodes(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addDatanodes(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // repeated .DatanodeInfoProto datanodes = 1; - private java.util.List datanodes_ = - java.util.Collections.emptyList(); - private void ensureDatanodesIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - datanodes_ = new java.util.ArrayList(datanodes_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> datanodesBuilder_; - - public java.util.List getDatanodesList() { - if (datanodesBuilder_ == null) { - return java.util.Collections.unmodifiableList(datanodes_); - } else { - return datanodesBuilder_.getMessageList(); - } - } - public int getDatanodesCount() { - if (datanodesBuilder_ == null) { - return datanodes_.size(); - } else { - return datanodesBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index) { - if (datanodesBuilder_ == null) { - return datanodes_.get(index); - } else { - return datanodesBuilder_.getMessage(index); - } - } - public Builder setDatanodes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (datanodesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureDatanodesIsMutable(); - datanodes_.set(index, value); - onChanged(); - } else { - datanodesBuilder_.setMessage(index, value); - } - return this; - } - public Builder setDatanodes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (datanodesBuilder_ == null) { - ensureDatanodesIsMutable(); - datanodes_.set(index, builderForValue.build()); - onChanged(); - } else { - datanodesBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addDatanodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (datanodesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureDatanodesIsMutable(); - datanodes_.add(value); - onChanged(); - } else { - datanodesBuilder_.addMessage(value); - } - return this; - } - public Builder addDatanodes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (datanodesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureDatanodesIsMutable(); - datanodes_.add(index, value); - onChanged(); - } else { - datanodesBuilder_.addMessage(index, value); - } - return this; - } - public Builder addDatanodes( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (datanodesBuilder_ == null) { - ensureDatanodesIsMutable(); - datanodes_.add(builderForValue.build()); - onChanged(); - } else { - datanodesBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addDatanodes( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (datanodesBuilder_ == null) { - ensureDatanodesIsMutable(); - datanodes_.add(index, builderForValue.build()); - onChanged(); - } else { - datanodesBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllDatanodes( - java.lang.Iterable values) { - if (datanodesBuilder_ == null) { - ensureDatanodesIsMutable(); - super.addAll(values, datanodes_); - onChanged(); - } else { - datanodesBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearDatanodes() { - if (datanodesBuilder_ == null) { - datanodes_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - datanodesBuilder_.clear(); - } - return this; - } - public Builder removeDatanodes(int index) { - if (datanodesBuilder_ == null) { - ensureDatanodesIsMutable(); - datanodes_.remove(index); - onChanged(); - } else { - datanodesBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getDatanodesBuilder( - int index) { - return getDatanodesFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder( - int index) { - if (datanodesBuilder_ == null) { - return datanodes_.get(index); } else { - return datanodesBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getDatanodesOrBuilderList() { - if (datanodesBuilder_ != null) { - return datanodesBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(datanodes_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDatanodesBuilder() { - return getDatanodesFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDatanodesBuilder( - int index) { - return getDatanodesFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public java.util.List - getDatanodesBuilderList() { - return getDatanodesFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> - getDatanodesFieldBuilder() { - if (datanodesBuilder_ == null) { - datanodesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( - datanodes_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - datanodes_ = null; - } - return datanodesBuilder_; - } - - // @@protoc_insertion_point(builder_scope:DatanodeInfosProto) - } - - static { - defaultInstance = new DatanodeInfosProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:DatanodeInfosProto) - } - - public interface DatanodeInfoProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .DatanodeIDProto id = 1; - boolean hasId(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder(); - - // optional uint64 capacity = 2; - boolean hasCapacity(); - long getCapacity(); - - // optional uint64 dfsUsed = 3; - boolean hasDfsUsed(); - long getDfsUsed(); - - // optional uint64 remaining = 4; - boolean hasRemaining(); - long getRemaining(); - - // optional uint64 blockPoolUsed = 5; - boolean hasBlockPoolUsed(); - long getBlockPoolUsed(); - - // optional uint64 lastUpdate = 6; - boolean hasLastUpdate(); - long getLastUpdate(); - - // optional uint32 xceiverCount = 7; - boolean hasXceiverCount(); - int getXceiverCount(); - - // optional string location = 8; - boolean hasLocation(); - String getLocation(); - - // optional string hostName = 9; - boolean hasHostName(); - String getHostName(); - - // optional .DatanodeInfoProto.AdminState adminState = 10; - boolean hasAdminState(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState(); - } - public static final class DatanodeInfoProto extends - com.google.protobuf.GeneratedMessage - implements DatanodeInfoProtoOrBuilder { - // Use DatanodeInfoProto.newBuilder() to construct. - private DatanodeInfoProto(Builder builder) { - super(builder); - } - private DatanodeInfoProto(boolean noInit) {} - - private static final DatanodeInfoProto defaultInstance; - public static DatanodeInfoProto getDefaultInstance() { - return defaultInstance; - } - - public DatanodeInfoProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfoProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfoProto_fieldAccessorTable; - } - - public enum AdminState - implements com.google.protobuf.ProtocolMessageEnum { - NORMAL(0, 0), - DECOMMISSION_INPROGRESS(1, 1), - DECOMMISSIONED(2, 2), - ; - - public static final int NORMAL_VALUE = 0; - public static final int DECOMMISSION_INPROGRESS_VALUE = 1; - public static final int DECOMMISSIONED_VALUE = 2; - - - public final int getNumber() { return value; } - - public static AdminState valueOf(int value) { - switch (value) { - case 0: return NORMAL; - case 1: return DECOMMISSION_INPROGRESS; - case 2: return DECOMMISSIONED; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public AdminState findValueByNumber(int number) { - return AdminState.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDescriptor().getEnumTypes().get(0); - } - - private static final AdminState[] VALUES = { - NORMAL, DECOMMISSION_INPROGRESS, DECOMMISSIONED, - }; - - public static AdminState valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private AdminState(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:DatanodeInfoProto.AdminState) - } - - private int bitField0_; - // required .DatanodeIDProto id = 1; - public static final int ID_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_; - public boolean hasId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() { - return id_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() { - return id_; - } - - // optional uint64 capacity = 2; - public static final int CAPACITY_FIELD_NUMBER = 2; - private long capacity_; - public boolean hasCapacity() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getCapacity() { - return capacity_; - } - - // optional uint64 dfsUsed = 3; - public static final int DFSUSED_FIELD_NUMBER = 3; - private long dfsUsed_; - public boolean hasDfsUsed() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getDfsUsed() { - return dfsUsed_; - } - - // optional uint64 remaining = 4; - public static final int REMAINING_FIELD_NUMBER = 4; - private long remaining_; - public boolean hasRemaining() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public long getRemaining() { - return remaining_; - } - - // optional uint64 blockPoolUsed = 5; - public static final int BLOCKPOOLUSED_FIELD_NUMBER = 5; - private long blockPoolUsed_; - public boolean hasBlockPoolUsed() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public long getBlockPoolUsed() { - return blockPoolUsed_; - } - - // optional uint64 lastUpdate = 6; - public static final int LASTUPDATE_FIELD_NUMBER = 6; - private long lastUpdate_; - public boolean hasLastUpdate() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public long getLastUpdate() { - return lastUpdate_; - } - - // optional uint32 xceiverCount = 7; - public static final int XCEIVERCOUNT_FIELD_NUMBER = 7; - private int xceiverCount_; - public boolean hasXceiverCount() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - public int getXceiverCount() { - return xceiverCount_; - } - - // optional string location = 8; - public static final int LOCATION_FIELD_NUMBER = 8; - private java.lang.Object location_; - public boolean hasLocation() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - public String getLocation() { - java.lang.Object ref = location_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - location_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getLocationBytes() { - java.lang.Object ref = location_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - location_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional string hostName = 9; - public static final int HOSTNAME_FIELD_NUMBER = 9; - private java.lang.Object hostName_; - public boolean hasHostName() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - public String getHostName() { - java.lang.Object ref = hostName_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - hostName_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getHostNameBytes() { - java.lang.Object ref = hostName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - hostName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional .DatanodeInfoProto.AdminState adminState = 10; - public static final int ADMINSTATE_FIELD_NUMBER = 10; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState adminState_; - public boolean hasAdminState() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() { - return adminState_; - } - - private void initFields() { - id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); - capacity_ = 0L; - dfsUsed_ = 0L; - remaining_ = 0L; - blockPoolUsed_ = 0L; - lastUpdate_ = 0L; - xceiverCount_ = 0; - location_ = ""; - hostName_ = ""; - adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasId()) { - memoizedIsInitialized = 0; - return false; - } - if (!getId().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, id_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, capacity_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, dfsUsed_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt64(4, remaining_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeUInt64(5, blockPoolUsed_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeUInt64(6, lastUpdate_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeUInt32(7, xceiverCount_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - output.writeBytes(8, getLocationBytes()); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - output.writeBytes(9, getHostNameBytes()); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - output.writeEnum(10, adminState_.getNumber()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, id_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, capacity_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, dfsUsed_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(4, remaining_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(5, blockPoolUsed_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(6, lastUpdate_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(7, xceiverCount_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(8, getLocationBytes()); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(9, getHostNameBytes()); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(10, adminState_.getNumber()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) obj; - - boolean result = true; - result = result && (hasId() == other.hasId()); - if (hasId()) { - result = result && getId() - .equals(other.getId()); - } - result = result && (hasCapacity() == other.hasCapacity()); - if (hasCapacity()) { - result = result && (getCapacity() - == other.getCapacity()); - } - result = result && (hasDfsUsed() == other.hasDfsUsed()); - if (hasDfsUsed()) { - result = result && (getDfsUsed() - == other.getDfsUsed()); - } - result = result && (hasRemaining() == other.hasRemaining()); - if (hasRemaining()) { - result = result && (getRemaining() - == other.getRemaining()); - } - result = result && (hasBlockPoolUsed() == other.hasBlockPoolUsed()); - if (hasBlockPoolUsed()) { - result = result && (getBlockPoolUsed() - == other.getBlockPoolUsed()); - } - result = result && (hasLastUpdate() == other.hasLastUpdate()); - if (hasLastUpdate()) { - result = result && (getLastUpdate() - == other.getLastUpdate()); - } - result = result && (hasXceiverCount() == other.hasXceiverCount()); - if (hasXceiverCount()) { - result = result && (getXceiverCount() - == other.getXceiverCount()); - } - result = result && (hasLocation() == other.hasLocation()); - if (hasLocation()) { - result = result && getLocation() - .equals(other.getLocation()); - } - result = result && (hasHostName() == other.hasHostName()); - if (hasHostName()) { - result = result && getHostName() - .equals(other.getHostName()); - } - result = result && (hasAdminState() == other.hasAdminState()); - if (hasAdminState()) { - result = result && - (getAdminState() == other.getAdminState()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasId()) { - hash = (37 * hash) + ID_FIELD_NUMBER; - hash = (53 * hash) + getId().hashCode(); - } - if (hasCapacity()) { - hash = (37 * hash) + CAPACITY_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCapacity()); - } - if (hasDfsUsed()) { - hash = (37 * hash) + DFSUSED_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getDfsUsed()); - } - if (hasRemaining()) { - hash = (37 * hash) + REMAINING_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getRemaining()); - } - if (hasBlockPoolUsed()) { - hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getBlockPoolUsed()); - } - if (hasLastUpdate()) { - hash = (37 * hash) + LASTUPDATE_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getLastUpdate()); - } - if (hasXceiverCount()) { - hash = (37 * hash) + XCEIVERCOUNT_FIELD_NUMBER; - hash = (53 * hash) + getXceiverCount(); - } - if (hasLocation()) { - hash = (37 * hash) + LOCATION_FIELD_NUMBER; - hash = (53 * hash) + getLocation().hashCode(); - } - if (hasHostName()) { - hash = (37 * hash) + HOSTNAME_FIELD_NUMBER; - hash = (53 * hash) + getHostName().hashCode(); - } - if (hasAdminState()) { - hash = (37 * hash) + ADMINSTATE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getAdminState()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfoProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeInfoProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getIdFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (idBuilder_ == null) { - id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); - } else { - idBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - capacity_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - dfsUsed_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - remaining_ = 0L; - bitField0_ = (bitField0_ & ~0x00000008); - blockPoolUsed_ = 0L; - bitField0_ = (bitField0_ & ~0x00000010); - lastUpdate_ = 0L; - bitField0_ = (bitField0_ & ~0x00000020); - xceiverCount_ = 0; - bitField0_ = (bitField0_ & ~0x00000040); - location_ = ""; - bitField0_ = (bitField0_ & ~0x00000080); - hostName_ = ""; - bitField0_ = (bitField0_ & ~0x00000100); - adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL; - bitField0_ = (bitField0_ & ~0x00000200); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (idBuilder_ == null) { - result.id_ = id_; - } else { - result.id_ = idBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.capacity_ = capacity_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.dfsUsed_ = dfsUsed_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.remaining_ = remaining_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.blockPoolUsed_ = blockPoolUsed_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.lastUpdate_ = lastUpdate_; - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000040; - } - result.xceiverCount_ = xceiverCount_; - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000080; - } - result.location_ = location_; - if (((from_bitField0_ & 0x00000100) == 0x00000100)) { - to_bitField0_ |= 0x00000100; - } - result.hostName_ = hostName_; - if (((from_bitField0_ & 0x00000200) == 0x00000200)) { - to_bitField0_ |= 0x00000200; - } - result.adminState_ = adminState_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) return this; - if (other.hasId()) { - mergeId(other.getId()); - } - if (other.hasCapacity()) { - setCapacity(other.getCapacity()); - } - if (other.hasDfsUsed()) { - setDfsUsed(other.getDfsUsed()); - } - if (other.hasRemaining()) { - setRemaining(other.getRemaining()); - } - if (other.hasBlockPoolUsed()) { - setBlockPoolUsed(other.getBlockPoolUsed()); - } - if (other.hasLastUpdate()) { - setLastUpdate(other.getLastUpdate()); - } - if (other.hasXceiverCount()) { - setXceiverCount(other.getXceiverCount()); - } - if (other.hasLocation()) { - setLocation(other.getLocation()); - } - if (other.hasHostName()) { - setHostName(other.getHostName()); - } - if (other.hasAdminState()) { - setAdminState(other.getAdminState()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasId()) { - - return false; - } - if (!getId().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(); - if (hasId()) { - subBuilder.mergeFrom(getId()); - } - input.readMessage(subBuilder, extensionRegistry); - setId(subBuilder.buildPartial()); - break; - } - case 16: { - bitField0_ |= 0x00000002; - capacity_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - dfsUsed_ = input.readUInt64(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - remaining_ = input.readUInt64(); - break; - } - case 40: { - bitField0_ |= 0x00000010; - blockPoolUsed_ = input.readUInt64(); - break; - } - case 48: { - bitField0_ |= 0x00000020; - lastUpdate_ = input.readUInt64(); - break; - } - case 56: { - bitField0_ |= 0x00000040; - xceiverCount_ = input.readUInt32(); - break; - } - case 66: { - bitField0_ |= 0x00000080; - location_ = input.readBytes(); - break; - } - case 74: { - bitField0_ |= 0x00000100; - hostName_ = input.readBytes(); - break; - } - case 80: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(10, rawValue); - } else { - bitField0_ |= 0x00000200; - adminState_ = value; - } - break; - } - } - } - } - - private int bitField0_; - - // required .DatanodeIDProto id = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> idBuilder_; - public boolean hasId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() { - if (idBuilder_ == null) { - return id_; - } else { - return idBuilder_.getMessage(); - } - } - public Builder setId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { - if (idBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - id_ = value; - onChanged(); - } else { - idBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setId( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { - if (idBuilder_ == null) { - id_ = builderForValue.build(); - onChanged(); - } else { - idBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { - if (idBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - id_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) { - id_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(id_).mergeFrom(value).buildPartial(); - } else { - id_ = value; - } - onChanged(); - } else { - idBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearId() { - if (idBuilder_ == null) { - id_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); - onChanged(); - } else { - idBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getIdBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getIdFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() { - if (idBuilder_ != null) { - return idBuilder_.getMessageOrBuilder(); - } else { - return id_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> - getIdFieldBuilder() { - if (idBuilder_ == null) { - idBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>( - id_, - getParentForChildren(), - isClean()); - id_ = null; - } - return idBuilder_; - } - - // optional uint64 capacity = 2; - private long capacity_ ; - public boolean hasCapacity() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getCapacity() { - return capacity_; - } - public Builder setCapacity(long value) { - bitField0_ |= 0x00000002; - capacity_ = value; - onChanged(); - return this; - } - public Builder clearCapacity() { - bitField0_ = (bitField0_ & ~0x00000002); - capacity_ = 0L; - onChanged(); - return this; - } - - // optional uint64 dfsUsed = 3; - private long dfsUsed_ ; - public boolean hasDfsUsed() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getDfsUsed() { - return dfsUsed_; - } - public Builder setDfsUsed(long value) { - bitField0_ |= 0x00000004; - dfsUsed_ = value; - onChanged(); - return this; - } - public Builder clearDfsUsed() { - bitField0_ = (bitField0_ & ~0x00000004); - dfsUsed_ = 0L; - onChanged(); - return this; - } - - // optional uint64 remaining = 4; - private long remaining_ ; - public boolean hasRemaining() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public long getRemaining() { - return remaining_; - } - public Builder setRemaining(long value) { - bitField0_ |= 0x00000008; - remaining_ = value; - onChanged(); - return this; - } - public Builder clearRemaining() { - bitField0_ = (bitField0_ & ~0x00000008); - remaining_ = 0L; - onChanged(); - return this; - } - - // optional uint64 blockPoolUsed = 5; - private long blockPoolUsed_ ; - public boolean hasBlockPoolUsed() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public long getBlockPoolUsed() { - return blockPoolUsed_; - } - public Builder setBlockPoolUsed(long value) { - bitField0_ |= 0x00000010; - blockPoolUsed_ = value; - onChanged(); - return this; - } - public Builder clearBlockPoolUsed() { - bitField0_ = (bitField0_ & ~0x00000010); - blockPoolUsed_ = 0L; - onChanged(); - return this; - } - - // optional uint64 lastUpdate = 6; - private long lastUpdate_ ; - public boolean hasLastUpdate() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public long getLastUpdate() { - return lastUpdate_; - } - public Builder setLastUpdate(long value) { - bitField0_ |= 0x00000020; - lastUpdate_ = value; - onChanged(); - return this; - } - public Builder clearLastUpdate() { - bitField0_ = (bitField0_ & ~0x00000020); - lastUpdate_ = 0L; - onChanged(); - return this; - } - - // optional uint32 xceiverCount = 7; - private int xceiverCount_ ; - public boolean hasXceiverCount() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - public int getXceiverCount() { - return xceiverCount_; - } - public Builder setXceiverCount(int value) { - bitField0_ |= 0x00000040; - xceiverCount_ = value; - onChanged(); - return this; - } - public Builder clearXceiverCount() { - bitField0_ = (bitField0_ & ~0x00000040); - xceiverCount_ = 0; - onChanged(); - return this; - } - - // optional string location = 8; - private java.lang.Object location_ = ""; - public boolean hasLocation() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - public String getLocation() { - java.lang.Object ref = location_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - location_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setLocation(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000080; - location_ = value; - onChanged(); - return this; - } - public Builder clearLocation() { - bitField0_ = (bitField0_ & ~0x00000080); - location_ = getDefaultInstance().getLocation(); - onChanged(); - return this; - } - void setLocation(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000080; - location_ = value; - onChanged(); - } - - // optional string hostName = 9; - private java.lang.Object hostName_ = ""; - public boolean hasHostName() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - public String getHostName() { - java.lang.Object ref = hostName_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - hostName_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setHostName(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000100; - hostName_ = value; - onChanged(); - return this; - } - public Builder clearHostName() { - bitField0_ = (bitField0_ & ~0x00000100); - hostName_ = getDefaultInstance().getHostName(); - onChanged(); - return this; - } - void setHostName(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000100; - hostName_ = value; - onChanged(); - } - - // optional .DatanodeInfoProto.AdminState adminState = 10; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL; - public boolean hasAdminState() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() { - return adminState_; - } - public Builder setAdminState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000200; - adminState_ = value; - onChanged(); - return this; - } - public Builder clearAdminState() { - bitField0_ = (bitField0_ & ~0x00000200); - adminState_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:DatanodeInfoProto) - } - - static { - defaultInstance = new DatanodeInfoProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:DatanodeInfoProto) - } - - public interface ContentSummaryProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint64 length = 1; - boolean hasLength(); - long getLength(); - - // required uint64 fileCount = 2; - boolean hasFileCount(); - long getFileCount(); - - // required uint64 directoryCount = 3; - boolean hasDirectoryCount(); - long getDirectoryCount(); - - // required uint64 quota = 4; - boolean hasQuota(); - long getQuota(); - - // required uint64 spaceConsumed = 5; - boolean hasSpaceConsumed(); - long getSpaceConsumed(); - - // required uint64 spaceQuota = 6; - boolean hasSpaceQuota(); - long getSpaceQuota(); - } - public static final class ContentSummaryProto extends - com.google.protobuf.GeneratedMessage - implements ContentSummaryProtoOrBuilder { - // Use ContentSummaryProto.newBuilder() to construct. - private ContentSummaryProto(Builder builder) { - super(builder); - } - private ContentSummaryProto(boolean noInit) {} - - private static final ContentSummaryProto defaultInstance; - public static ContentSummaryProto getDefaultInstance() { - return defaultInstance; - } - - public ContentSummaryProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ContentSummaryProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ContentSummaryProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint64 length = 1; - public static final int LENGTH_FIELD_NUMBER = 1; - private long length_; - public boolean hasLength() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getLength() { - return length_; - } - - // required uint64 fileCount = 2; - public static final int FILECOUNT_FIELD_NUMBER = 2; - private long fileCount_; - public boolean hasFileCount() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getFileCount() { - return fileCount_; - } - - // required uint64 directoryCount = 3; - public static final int DIRECTORYCOUNT_FIELD_NUMBER = 3; - private long directoryCount_; - public boolean hasDirectoryCount() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getDirectoryCount() { - return directoryCount_; - } - - // required uint64 quota = 4; - public static final int QUOTA_FIELD_NUMBER = 4; - private long quota_; - public boolean hasQuota() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public long getQuota() { - return quota_; - } - - // required uint64 spaceConsumed = 5; - public static final int SPACECONSUMED_FIELD_NUMBER = 5; - private long spaceConsumed_; - public boolean hasSpaceConsumed() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public long getSpaceConsumed() { - return spaceConsumed_; - } - - // required uint64 spaceQuota = 6; - public static final int SPACEQUOTA_FIELD_NUMBER = 6; - private long spaceQuota_; - public boolean hasSpaceQuota() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public long getSpaceQuota() { - return spaceQuota_; - } - - private void initFields() { - length_ = 0L; - fileCount_ = 0L; - directoryCount_ = 0L; - quota_ = 0L; - spaceConsumed_ = 0L; - spaceQuota_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasLength()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasFileCount()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasDirectoryCount()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasQuota()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasSpaceConsumed()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasSpaceQuota()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, length_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, fileCount_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, directoryCount_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt64(4, quota_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeUInt64(5, spaceConsumed_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeUInt64(6, spaceQuota_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, length_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, fileCount_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, directoryCount_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(4, quota_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(5, spaceConsumed_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(6, spaceQuota_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) obj; - - boolean result = true; - result = result && (hasLength() == other.hasLength()); - if (hasLength()) { - result = result && (getLength() - == other.getLength()); - } - result = result && (hasFileCount() == other.hasFileCount()); - if (hasFileCount()) { - result = result && (getFileCount() - == other.getFileCount()); - } - result = result && (hasDirectoryCount() == other.hasDirectoryCount()); - if (hasDirectoryCount()) { - result = result && (getDirectoryCount() - == other.getDirectoryCount()); - } - result = result && (hasQuota() == other.hasQuota()); - if (hasQuota()) { - result = result && (getQuota() - == other.getQuota()); - } - result = result && (hasSpaceConsumed() == other.hasSpaceConsumed()); - if (hasSpaceConsumed()) { - result = result && (getSpaceConsumed() - == other.getSpaceConsumed()); - } - result = result && (hasSpaceQuota() == other.hasSpaceQuota()); - if (hasSpaceQuota()) { - result = result && (getSpaceQuota() - == other.getSpaceQuota()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasLength()) { - hash = (37 * hash) + LENGTH_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getLength()); - } - if (hasFileCount()) { - hash = (37 * hash) + FILECOUNT_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getFileCount()); - } - if (hasDirectoryCount()) { - hash = (37 * hash) + DIRECTORYCOUNT_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getDirectoryCount()); - } - if (hasQuota()) { - hash = (37 * hash) + QUOTA_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getQuota()); - } - if (hasSpaceConsumed()) { - hash = (37 * hash) + SPACECONSUMED_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getSpaceConsumed()); - } - if (hasSpaceQuota()) { - hash = (37 * hash) + SPACEQUOTA_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getSpaceQuota()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ContentSummaryProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ContentSummaryProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - length_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - fileCount_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - directoryCount_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - quota_ = 0L; - bitField0_ = (bitField0_ & ~0x00000008); - spaceConsumed_ = 0L; - bitField0_ = (bitField0_ & ~0x00000010); - spaceQuota_ = 0L; - bitField0_ = (bitField0_ & ~0x00000020); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.length_ = length_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.fileCount_ = fileCount_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.directoryCount_ = directoryCount_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.quota_ = quota_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.spaceConsumed_ = spaceConsumed_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.spaceQuota_ = spaceQuota_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance()) return this; - if (other.hasLength()) { - setLength(other.getLength()); - } - if (other.hasFileCount()) { - setFileCount(other.getFileCount()); - } - if (other.hasDirectoryCount()) { - setDirectoryCount(other.getDirectoryCount()); - } - if (other.hasQuota()) { - setQuota(other.getQuota()); - } - if (other.hasSpaceConsumed()) { - setSpaceConsumed(other.getSpaceConsumed()); - } - if (other.hasSpaceQuota()) { - setSpaceQuota(other.getSpaceQuota()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasLength()) { - - return false; - } - if (!hasFileCount()) { - - return false; - } - if (!hasDirectoryCount()) { - - return false; - } - if (!hasQuota()) { - - return false; - } - if (!hasSpaceConsumed()) { - - return false; - } - if (!hasSpaceQuota()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - length_ = input.readUInt64(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - fileCount_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - directoryCount_ = input.readUInt64(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - quota_ = input.readUInt64(); - break; - } - case 40: { - bitField0_ |= 0x00000010; - spaceConsumed_ = input.readUInt64(); - break; - } - case 48: { - bitField0_ |= 0x00000020; - spaceQuota_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required uint64 length = 1; - private long length_ ; - public boolean hasLength() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getLength() { - return length_; - } - public Builder setLength(long value) { - bitField0_ |= 0x00000001; - length_ = value; - onChanged(); - return this; - } - public Builder clearLength() { - bitField0_ = (bitField0_ & ~0x00000001); - length_ = 0L; - onChanged(); - return this; - } - - // required uint64 fileCount = 2; - private long fileCount_ ; - public boolean hasFileCount() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getFileCount() { - return fileCount_; - } - public Builder setFileCount(long value) { - bitField0_ |= 0x00000002; - fileCount_ = value; - onChanged(); - return this; - } - public Builder clearFileCount() { - bitField0_ = (bitField0_ & ~0x00000002); - fileCount_ = 0L; - onChanged(); - return this; - } - - // required uint64 directoryCount = 3; - private long directoryCount_ ; - public boolean hasDirectoryCount() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getDirectoryCount() { - return directoryCount_; - } - public Builder setDirectoryCount(long value) { - bitField0_ |= 0x00000004; - directoryCount_ = value; - onChanged(); - return this; - } - public Builder clearDirectoryCount() { - bitField0_ = (bitField0_ & ~0x00000004); - directoryCount_ = 0L; - onChanged(); - return this; - } - - // required uint64 quota = 4; - private long quota_ ; - public boolean hasQuota() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public long getQuota() { - return quota_; - } - public Builder setQuota(long value) { - bitField0_ |= 0x00000008; - quota_ = value; - onChanged(); - return this; - } - public Builder clearQuota() { - bitField0_ = (bitField0_ & ~0x00000008); - quota_ = 0L; - onChanged(); - return this; - } - - // required uint64 spaceConsumed = 5; - private long spaceConsumed_ ; - public boolean hasSpaceConsumed() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public long getSpaceConsumed() { - return spaceConsumed_; - } - public Builder setSpaceConsumed(long value) { - bitField0_ |= 0x00000010; - spaceConsumed_ = value; - onChanged(); - return this; - } - public Builder clearSpaceConsumed() { - bitField0_ = (bitField0_ & ~0x00000010); - spaceConsumed_ = 0L; - onChanged(); - return this; - } - - // required uint64 spaceQuota = 6; - private long spaceQuota_ ; - public boolean hasSpaceQuota() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public long getSpaceQuota() { - return spaceQuota_; - } - public Builder setSpaceQuota(long value) { - bitField0_ |= 0x00000020; - spaceQuota_ = value; - onChanged(); - return this; - } - public Builder clearSpaceQuota() { - bitField0_ = (bitField0_ & ~0x00000020); - spaceQuota_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:ContentSummaryProto) - } - - static { - defaultInstance = new ContentSummaryProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ContentSummaryProto) - } - - public interface CorruptFileBlocksProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated string files = 1; - java.util.List getFilesList(); - int getFilesCount(); - String getFiles(int index); - - // required string cookie = 2; - boolean hasCookie(); - String getCookie(); - } - public static final class CorruptFileBlocksProto extends - com.google.protobuf.GeneratedMessage - implements CorruptFileBlocksProtoOrBuilder { - // Use CorruptFileBlocksProto.newBuilder() to construct. - private CorruptFileBlocksProto(Builder builder) { - super(builder); - } - private CorruptFileBlocksProto(boolean noInit) {} - - private static final CorruptFileBlocksProto defaultInstance; - public static CorruptFileBlocksProto getDefaultInstance() { - return defaultInstance; - } - - public CorruptFileBlocksProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CorruptFileBlocksProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CorruptFileBlocksProto_fieldAccessorTable; - } - - private int bitField0_; - // repeated string files = 1; - public static final int FILES_FIELD_NUMBER = 1; - private com.google.protobuf.LazyStringList files_; - public java.util.List - getFilesList() { - return files_; - } - public int getFilesCount() { - return files_.size(); - } - public String getFiles(int index) { - return files_.get(index); - } - - // required string cookie = 2; - public static final int COOKIE_FIELD_NUMBER = 2; - private java.lang.Object cookie_; - public boolean hasCookie() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getCookie() { - java.lang.Object ref = cookie_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - cookie_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getCookieBytes() { - java.lang.Object ref = cookie_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - cookie_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - files_ = com.google.protobuf.LazyStringArrayList.EMPTY; - cookie_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasCookie()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < files_.size(); i++) { - output.writeBytes(1, files_.getByteString(i)); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(2, getCookieBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - { - int dataSize = 0; - for (int i = 0; i < files_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeBytesSizeNoTag(files_.getByteString(i)); - } - size += dataSize; - size += 1 * getFilesList().size(); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getCookieBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) obj; - - boolean result = true; - result = result && getFilesList() - .equals(other.getFilesList()); - result = result && (hasCookie() == other.hasCookie()); - if (hasCookie()) { - result = result && getCookie() - .equals(other.getCookie()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getFilesCount() > 0) { - hash = (37 * hash) + FILES_FIELD_NUMBER; - hash = (53 * hash) + getFilesList().hashCode(); - } - if (hasCookie()) { - hash = (37 * hash) + COOKIE_FIELD_NUMBER; - hash = (53 * hash) + getCookie().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CorruptFileBlocksProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CorruptFileBlocksProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - files_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - cookie_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - files_ = new com.google.protobuf.UnmodifiableLazyStringList( - files_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.files_ = files_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000001; - } - result.cookie_ = cookie_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance()) return this; - if (!other.files_.isEmpty()) { - if (files_.isEmpty()) { - files_ = other.files_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureFilesIsMutable(); - files_.addAll(other.files_); - } - onChanged(); - } - if (other.hasCookie()) { - setCookie(other.getCookie()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasCookie()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - ensureFilesIsMutable(); - files_.add(input.readBytes()); - break; - } - case 18: { - bitField0_ |= 0x00000002; - cookie_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // repeated string files = 1; - private com.google.protobuf.LazyStringList files_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensureFilesIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - files_ = new com.google.protobuf.LazyStringArrayList(files_); - bitField0_ |= 0x00000001; - } - } - public java.util.List - getFilesList() { - return java.util.Collections.unmodifiableList(files_); - } - public int getFilesCount() { - return files_.size(); - } - public String getFiles(int index) { - return files_.get(index); - } - public Builder setFiles( - int index, String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureFilesIsMutable(); - files_.set(index, value); - onChanged(); - return this; - } - public Builder addFiles(String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureFilesIsMutable(); - files_.add(value); - onChanged(); - return this; - } - public Builder addAllFiles( - java.lang.Iterable values) { - ensureFilesIsMutable(); - super.addAll(values, files_); - onChanged(); - return this; - } - public Builder clearFiles() { - files_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - return this; - } - void addFiles(com.google.protobuf.ByteString value) { - ensureFilesIsMutable(); - files_.add(value); - onChanged(); - } - - // required string cookie = 2; - private java.lang.Object cookie_ = ""; - public boolean hasCookie() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getCookie() { - java.lang.Object ref = cookie_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - cookie_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setCookie(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - cookie_ = value; - onChanged(); - return this; - } - public Builder clearCookie() { - bitField0_ = (bitField0_ & ~0x00000002); - cookie_ = getDefaultInstance().getCookie(); - onChanged(); - return this; - } - void setCookie(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - cookie_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:CorruptFileBlocksProto) - } - - static { - defaultInstance = new CorruptFileBlocksProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:CorruptFileBlocksProto) - } - - public interface FsPermissionProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint32 perm = 1; - boolean hasPerm(); - int getPerm(); - } - public static final class FsPermissionProto extends - com.google.protobuf.GeneratedMessage - implements FsPermissionProtoOrBuilder { - // Use FsPermissionProto.newBuilder() to construct. - private FsPermissionProto(Builder builder) { - super(builder); - } - private FsPermissionProto(boolean noInit) {} - - private static final FsPermissionProto defaultInstance; - public static FsPermissionProto getDefaultInstance() { - return defaultInstance; - } - - public FsPermissionProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsPermissionProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsPermissionProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint32 perm = 1; - public static final int PERM_FIELD_NUMBER = 1; - private int perm_; - public boolean hasPerm() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public int getPerm() { - return perm_; - } - - private void initFields() { - perm_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasPerm()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt32(1, perm_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(1, perm_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto) obj; - - boolean result = true; - result = result && (hasPerm() == other.hasPerm()); - if (hasPerm()) { - result = result && (getPerm() - == other.getPerm()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasPerm()) { - hash = (37 * hash) + PERM_FIELD_NUMBER; - hash = (53 * hash) + getPerm(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsPermissionProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsPermissionProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - perm_ = 0; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.perm_ = perm_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance()) return this; - if (other.hasPerm()) { - setPerm(other.getPerm()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasPerm()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - perm_ = input.readUInt32(); - break; - } - } - } - } - - private int bitField0_; - - // required uint32 perm = 1; - private int perm_ ; - public boolean hasPerm() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public int getPerm() { - return perm_; - } - public Builder setPerm(int value) { - bitField0_ |= 0x00000001; - perm_ = value; - onChanged(); - return this; - } - public Builder clearPerm() { - bitField0_ = (bitField0_ & ~0x00000001); - perm_ = 0; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:FsPermissionProto) - } - - static { - defaultInstance = new FsPermissionProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:FsPermissionProto) - } - - public interface LocatedBlockProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ExtendedBlockProto b = 1; - boolean hasB(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder(); - - // required uint64 offset = 2; - boolean hasOffset(); - long getOffset(); - - // repeated .DatanodeInfoProto locs = 3; - java.util.List - getLocsList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index); - int getLocsCount(); - java.util.List - getLocsOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder( - int index); - - // required bool corrupt = 4; - boolean hasCorrupt(); - boolean getCorrupt(); - - // required .BlockTokenIdentifierProto blockToken = 5; - boolean hasBlockToken(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getBlockToken(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getBlockTokenOrBuilder(); - } - public static final class LocatedBlockProto extends - com.google.protobuf.GeneratedMessage - implements LocatedBlockProtoOrBuilder { - // Use LocatedBlockProto.newBuilder() to construct. - private LocatedBlockProto(Builder builder) { - super(builder); - } - private LocatedBlockProto(boolean noInit) {} - - private static final LocatedBlockProto defaultInstance; - public static LocatedBlockProto getDefaultInstance() { - return defaultInstance; - } - - public LocatedBlockProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlockProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ExtendedBlockProto b = 1; - public static final int B_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_; - public boolean hasB() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() { - return b_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() { - return b_; - } - - // required uint64 offset = 2; - public static final int OFFSET_FIELD_NUMBER = 2; - private long offset_; - public boolean hasOffset() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getOffset() { - return offset_; - } - - // repeated .DatanodeInfoProto locs = 3; - public static final int LOCS_FIELD_NUMBER = 3; - private java.util.List locs_; - public java.util.List getLocsList() { - return locs_; - } - public java.util.List - getLocsOrBuilderList() { - return locs_; - } - public int getLocsCount() { - return locs_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) { - return locs_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder( - int index) { - return locs_.get(index); - } - - // required bool corrupt = 4; - public static final int CORRUPT_FIELD_NUMBER = 4; - private boolean corrupt_; - public boolean hasCorrupt() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public boolean getCorrupt() { - return corrupt_; - } - - // required .BlockTokenIdentifierProto blockToken = 5; - public static final int BLOCKTOKEN_FIELD_NUMBER = 5; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto blockToken_; - public boolean hasBlockToken() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getBlockToken() { - return blockToken_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getBlockTokenOrBuilder() { - return blockToken_; - } - - private void initFields() { - b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - offset_ = 0L; - locs_ = java.util.Collections.emptyList(); - corrupt_ = false; - blockToken_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasB()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasOffset()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCorrupt()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBlockToken()) { - memoizedIsInitialized = 0; - return false; - } - if (!getB().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getLocsCount(); i++) { - if (!getLocs(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (!getBlockToken().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, b_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, offset_); - } - for (int i = 0; i < locs_.size(); i++) { - output.writeMessage(3, locs_.get(i)); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBool(4, corrupt_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeMessage(5, blockToken_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, b_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, offset_); - } - for (int i = 0; i < locs_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, locs_.get(i)); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(4, corrupt_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(5, blockToken_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) obj; - - boolean result = true; - result = result && (hasB() == other.hasB()); - if (hasB()) { - result = result && getB() - .equals(other.getB()); - } - result = result && (hasOffset() == other.hasOffset()); - if (hasOffset()) { - result = result && (getOffset() - == other.getOffset()); - } - result = result && getLocsList() - .equals(other.getLocsList()); - result = result && (hasCorrupt() == other.hasCorrupt()); - if (hasCorrupt()) { - result = result && (getCorrupt() - == other.getCorrupt()); - } - result = result && (hasBlockToken() == other.hasBlockToken()); - if (hasBlockToken()) { - result = result && getBlockToken() - .equals(other.getBlockToken()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasB()) { - hash = (37 * hash) + B_FIELD_NUMBER; - hash = (53 * hash) + getB().hashCode(); - } - if (hasOffset()) { - hash = (37 * hash) + OFFSET_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getOffset()); - } - if (getLocsCount() > 0) { - hash = (37 * hash) + LOCS_FIELD_NUMBER; - hash = (53 * hash) + getLocsList().hashCode(); - } - if (hasCorrupt()) { - hash = (37 * hash) + CORRUPT_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getCorrupt()); - } - if (hasBlockToken()) { - hash = (37 * hash) + BLOCKTOKEN_FIELD_NUMBER; - hash = (53 * hash) + getBlockToken().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlockProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBFieldBuilder(); - getLocsFieldBuilder(); - getBlockTokenFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (bBuilder_ == null) { - b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } else { - bBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - offset_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - if (locsBuilder_ == null) { - locs_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - } else { - locsBuilder_.clear(); - } - corrupt_ = false; - bitField0_ = (bitField0_ & ~0x00000008); - if (blockTokenBuilder_ == null) { - blockToken_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - } else { - blockTokenBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (bBuilder_ == null) { - result.b_ = b_; - } else { - result.b_ = bBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.offset_ = offset_; - if (locsBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004)) { - locs_ = java.util.Collections.unmodifiableList(locs_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.locs_ = locs_; - } else { - result.locs_ = locsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000004; - } - result.corrupt_ = corrupt_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000008; - } - if (blockTokenBuilder_ == null) { - result.blockToken_ = blockToken_; - } else { - result.blockToken_ = blockTokenBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) return this; - if (other.hasB()) { - mergeB(other.getB()); - } - if (other.hasOffset()) { - setOffset(other.getOffset()); - } - if (locsBuilder_ == null) { - if (!other.locs_.isEmpty()) { - if (locs_.isEmpty()) { - locs_ = other.locs_; - bitField0_ = (bitField0_ & ~0x00000004); - } else { - ensureLocsIsMutable(); - locs_.addAll(other.locs_); - } - onChanged(); - } - } else { - if (!other.locs_.isEmpty()) { - if (locsBuilder_.isEmpty()) { - locsBuilder_.dispose(); - locsBuilder_ = null; - locs_ = other.locs_; - bitField0_ = (bitField0_ & ~0x00000004); - locsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getLocsFieldBuilder() : null; - } else { - locsBuilder_.addAllMessages(other.locs_); - } - } - } - if (other.hasCorrupt()) { - setCorrupt(other.getCorrupt()); - } - if (other.hasBlockToken()) { - mergeBlockToken(other.getBlockToken()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasB()) { - - return false; - } - if (!hasOffset()) { - - return false; - } - if (!hasCorrupt()) { - - return false; - } - if (!hasBlockToken()) { - - return false; - } - if (!getB().isInitialized()) { - - return false; - } - for (int i = 0; i < getLocsCount(); i++) { - if (!getLocs(i).isInitialized()) { - - return false; - } - } - if (!getBlockToken().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); - if (hasB()) { - subBuilder.mergeFrom(getB()); - } - input.readMessage(subBuilder, extensionRegistry); - setB(subBuilder.buildPartial()); - break; - } - case 16: { - bitField0_ |= 0x00000002; - offset_ = input.readUInt64(); - break; - } - case 26: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addLocs(subBuilder.buildPartial()); - break; - } - case 32: { - bitField0_ |= 0x00000008; - corrupt_ = input.readBool(); - break; - } - case 42: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(); - if (hasBlockToken()) { - subBuilder.mergeFrom(getBlockToken()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlockToken(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ExtendedBlockProto b = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> bBuilder_; - public boolean hasB() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() { - if (bBuilder_ == null) { - return b_; - } else { - return bBuilder_.getMessage(); - } - } - public Builder setB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (bBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - b_ = value; - onChanged(); - } else { - bBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setB( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { - if (bBuilder_ == null) { - b_ = builderForValue.build(); - onChanged(); - } else { - bBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (bBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - b_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { - b_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(b_).mergeFrom(value).buildPartial(); - } else { - b_ = value; - } - onChanged(); - } else { - bBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearB() { - if (bBuilder_ == null) { - b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - onChanged(); - } else { - bBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() { - if (bBuilder_ != null) { - return bBuilder_.getMessageOrBuilder(); - } else { - return b_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> - getBFieldBuilder() { - if (bBuilder_ == null) { - bBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( - b_, - getParentForChildren(), - isClean()); - b_ = null; - } - return bBuilder_; - } - - // required uint64 offset = 2; - private long offset_ ; - public boolean hasOffset() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getOffset() { - return offset_; - } - public Builder setOffset(long value) { - bitField0_ |= 0x00000002; - offset_ = value; - onChanged(); - return this; - } - public Builder clearOffset() { - bitField0_ = (bitField0_ & ~0x00000002); - offset_ = 0L; - onChanged(); - return this; - } - - // repeated .DatanodeInfoProto locs = 3; - private java.util.List locs_ = - java.util.Collections.emptyList(); - private void ensureLocsIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - locs_ = new java.util.ArrayList(locs_); - bitField0_ |= 0x00000004; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> locsBuilder_; - - public java.util.List getLocsList() { - if (locsBuilder_ == null) { - return java.util.Collections.unmodifiableList(locs_); - } else { - return locsBuilder_.getMessageList(); - } - } - public int getLocsCount() { - if (locsBuilder_ == null) { - return locs_.size(); - } else { - return locsBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) { - if (locsBuilder_ == null) { - return locs_.get(index); - } else { - return locsBuilder_.getMessage(index); - } - } - public Builder setLocs( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (locsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureLocsIsMutable(); - locs_.set(index, value); - onChanged(); - } else { - locsBuilder_.setMessage(index, value); - } - return this; - } - public Builder setLocs( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (locsBuilder_ == null) { - ensureLocsIsMutable(); - locs_.set(index, builderForValue.build()); - onChanged(); - } else { - locsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addLocs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (locsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureLocsIsMutable(); - locs_.add(value); - onChanged(); - } else { - locsBuilder_.addMessage(value); - } - return this; - } - public Builder addLocs( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) { - if (locsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureLocsIsMutable(); - locs_.add(index, value); - onChanged(); - } else { - locsBuilder_.addMessage(index, value); - } - return this; - } - public Builder addLocs( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (locsBuilder_ == null) { - ensureLocsIsMutable(); - locs_.add(builderForValue.build()); - onChanged(); - } else { - locsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addLocs( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) { - if (locsBuilder_ == null) { - ensureLocsIsMutable(); - locs_.add(index, builderForValue.build()); - onChanged(); - } else { - locsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllLocs( - java.lang.Iterable values) { - if (locsBuilder_ == null) { - ensureLocsIsMutable(); - super.addAll(values, locs_); - onChanged(); - } else { - locsBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearLocs() { - if (locsBuilder_ == null) { - locs_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - } else { - locsBuilder_.clear(); - } - return this; - } - public Builder removeLocs(int index) { - if (locsBuilder_ == null) { - ensureLocsIsMutable(); - locs_.remove(index); - onChanged(); - } else { - locsBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getLocsBuilder( - int index) { - return getLocsFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder( - int index) { - if (locsBuilder_ == null) { - return locs_.get(index); } else { - return locsBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getLocsOrBuilderList() { - if (locsBuilder_ != null) { - return locsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(locs_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder() { - return getLocsFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder( - int index) { - return getLocsFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()); - } - public java.util.List - getLocsBuilderList() { - return getLocsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> - getLocsFieldBuilder() { - if (locsBuilder_ == null) { - locsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>( - locs_, - ((bitField0_ & 0x00000004) == 0x00000004), - getParentForChildren(), - isClean()); - locs_ = null; - } - return locsBuilder_; - } - - // required bool corrupt = 4; - private boolean corrupt_ ; - public boolean hasCorrupt() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public boolean getCorrupt() { - return corrupt_; - } - public Builder setCorrupt(boolean value) { - bitField0_ |= 0x00000008; - corrupt_ = value; - onChanged(); - return this; - } - public Builder clearCorrupt() { - bitField0_ = (bitField0_ & ~0x00000008); - corrupt_ = false; - onChanged(); - return this; - } - - // required .BlockTokenIdentifierProto blockToken = 5; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto blockToken_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> blockTokenBuilder_; - public boolean hasBlockToken() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getBlockToken() { - if (blockTokenBuilder_ == null) { - return blockToken_; - } else { - return blockTokenBuilder_.getMessage(); - } - } - public Builder setBlockToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) { - if (blockTokenBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - blockToken_ = value; - onChanged(); - } else { - blockTokenBuilder_.setMessage(value); - } - bitField0_ |= 0x00000010; - return this; - } - public Builder setBlockToken( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder builderForValue) { - if (blockTokenBuilder_ == null) { - blockToken_ = builderForValue.build(); - onChanged(); - } else { - blockTokenBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000010; - return this; - } - public Builder mergeBlockToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) { - if (blockTokenBuilder_ == null) { - if (((bitField0_ & 0x00000010) == 0x00000010) && - blockToken_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance()) { - blockToken_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(blockToken_).mergeFrom(value).buildPartial(); - } else { - blockToken_ = value; - } - onChanged(); - } else { - blockTokenBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000010; - return this; - } - public Builder clearBlockToken() { - if (blockTokenBuilder_ == null) { - blockToken_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance(); - onChanged(); - } else { - blockTokenBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder getBlockTokenBuilder() { - bitField0_ |= 0x00000010; - onChanged(); - return getBlockTokenFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getBlockTokenOrBuilder() { - if (blockTokenBuilder_ != null) { - return blockTokenBuilder_.getMessageOrBuilder(); - } else { - return blockToken_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> - getBlockTokenFieldBuilder() { - if (blockTokenBuilder_ == null) { - blockTokenBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder>( - blockToken_, - getParentForChildren(), - isClean()); - blockToken_ = null; - } - return blockTokenBuilder_; - } - - // @@protoc_insertion_point(builder_scope:LocatedBlockProto) - } - - static { - defaultInstance = new LocatedBlockProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:LocatedBlockProto) - } - - public interface LocatedBlocksProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint64 fileLength = 1; - boolean hasFileLength(); - long getFileLength(); - - // repeated .LocatedBlockProto blocks = 2; - java.util.List - getBlocksList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index); - int getBlocksCount(); - java.util.List - getBlocksOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( - int index); - - // required bool underConstruction = 3; - boolean hasUnderConstruction(); - boolean getUnderConstruction(); - - // optional .LocatedBlockProto lastBlock = 4; - boolean hasLastBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder(); - - // required bool isLastBlockComplete = 5; - boolean hasIsLastBlockComplete(); - boolean getIsLastBlockComplete(); - } - public static final class LocatedBlocksProto extends - com.google.protobuf.GeneratedMessage - implements LocatedBlocksProtoOrBuilder { - // Use LocatedBlocksProto.newBuilder() to construct. - private LocatedBlocksProto(Builder builder) { - super(builder); - } - private LocatedBlocksProto(boolean noInit) {} - - private static final LocatedBlocksProto defaultInstance; - public static LocatedBlocksProto getDefaultInstance() { - return defaultInstance; - } - - public LocatedBlocksProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlocksProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlocksProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint64 fileLength = 1; - public static final int FILELENGTH_FIELD_NUMBER = 1; - private long fileLength_; - public boolean hasFileLength() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getFileLength() { - return fileLength_; - } - - // repeated .LocatedBlockProto blocks = 2; - public static final int BLOCKS_FIELD_NUMBER = 2; - private java.util.List blocks_; - public java.util.List getBlocksList() { - return blocks_; - } - public java.util.List - getBlocksOrBuilderList() { - return blocks_; - } - public int getBlocksCount() { - return blocks_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) { - return blocks_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( - int index) { - return blocks_.get(index); - } - - // required bool underConstruction = 3; - public static final int UNDERCONSTRUCTION_FIELD_NUMBER = 3; - private boolean underConstruction_; - public boolean hasUnderConstruction() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public boolean getUnderConstruction() { - return underConstruction_; - } - - // optional .LocatedBlockProto lastBlock = 4; - public static final int LASTBLOCK_FIELD_NUMBER = 4; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_; - public boolean hasLastBlock() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() { - return lastBlock_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() { - return lastBlock_; - } - - // required bool isLastBlockComplete = 5; - public static final int ISLASTBLOCKCOMPLETE_FIELD_NUMBER = 5; - private boolean isLastBlockComplete_; - public boolean hasIsLastBlockComplete() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public boolean getIsLastBlockComplete() { - return isLastBlockComplete_; - } - - private void initFields() { - fileLength_ = 0L; - blocks_ = java.util.Collections.emptyList(); - underConstruction_ = false; - lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - isLastBlockComplete_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasFileLength()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasUnderConstruction()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasIsLastBlockComplete()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getBlocksCount(); i++) { - if (!getBlocks(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasLastBlock()) { - if (!getLastBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, fileLength_); - } - for (int i = 0; i < blocks_.size(); i++) { - output.writeMessage(2, blocks_.get(i)); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(3, underConstruction_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(4, lastBlock_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBool(5, isLastBlockComplete_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, fileLength_); - } - for (int i = 0; i < blocks_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, blocks_.get(i)); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(3, underConstruction_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, lastBlock_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(5, isLastBlockComplete_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) obj; - - boolean result = true; - result = result && (hasFileLength() == other.hasFileLength()); - if (hasFileLength()) { - result = result && (getFileLength() - == other.getFileLength()); - } - result = result && getBlocksList() - .equals(other.getBlocksList()); - result = result && (hasUnderConstruction() == other.hasUnderConstruction()); - if (hasUnderConstruction()) { - result = result && (getUnderConstruction() - == other.getUnderConstruction()); - } - result = result && (hasLastBlock() == other.hasLastBlock()); - if (hasLastBlock()) { - result = result && getLastBlock() - .equals(other.getLastBlock()); - } - result = result && (hasIsLastBlockComplete() == other.hasIsLastBlockComplete()); - if (hasIsLastBlockComplete()) { - result = result && (getIsLastBlockComplete() - == other.getIsLastBlockComplete()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasFileLength()) { - hash = (37 * hash) + FILELENGTH_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getFileLength()); - } - if (getBlocksCount() > 0) { - hash = (37 * hash) + BLOCKS_FIELD_NUMBER; - hash = (53 * hash) + getBlocksList().hashCode(); - } - if (hasUnderConstruction()) { - hash = (37 * hash) + UNDERCONSTRUCTION_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getUnderConstruction()); - } - if (hasLastBlock()) { - hash = (37 * hash) + LASTBLOCK_FIELD_NUMBER; - hash = (53 * hash) + getLastBlock().hashCode(); - } - if (hasIsLastBlockComplete()) { - hash = (37 * hash) + ISLASTBLOCKCOMPLETE_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getIsLastBlockComplete()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlocksProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlocksProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlocksFieldBuilder(); - getLastBlockFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - fileLength_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - if (blocksBuilder_ == null) { - blocks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - blocksBuilder_.clear(); - } - underConstruction_ = false; - bitField0_ = (bitField0_ & ~0x00000004); - if (lastBlockBuilder_ == null) { - lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - } else { - lastBlockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - isLastBlockComplete_ = false; - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.fileLength_ = fileLength_; - if (blocksBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - blocks_ = java.util.Collections.unmodifiableList(blocks_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.blocks_ = blocks_; - } else { - result.blocks_ = blocksBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000002; - } - result.underConstruction_ = underConstruction_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000004; - } - if (lastBlockBuilder_ == null) { - result.lastBlock_ = lastBlock_; - } else { - result.lastBlock_ = lastBlockBuilder_.build(); - } - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000008; - } - result.isLastBlockComplete_ = isLastBlockComplete_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) return this; - if (other.hasFileLength()) { - setFileLength(other.getFileLength()); - } - if (blocksBuilder_ == null) { - if (!other.blocks_.isEmpty()) { - if (blocks_.isEmpty()) { - blocks_ = other.blocks_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureBlocksIsMutable(); - blocks_.addAll(other.blocks_); - } - onChanged(); - } - } else { - if (!other.blocks_.isEmpty()) { - if (blocksBuilder_.isEmpty()) { - blocksBuilder_.dispose(); - blocksBuilder_ = null; - blocks_ = other.blocks_; - bitField0_ = (bitField0_ & ~0x00000002); - blocksBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getBlocksFieldBuilder() : null; - } else { - blocksBuilder_.addAllMessages(other.blocks_); - } - } - } - if (other.hasUnderConstruction()) { - setUnderConstruction(other.getUnderConstruction()); - } - if (other.hasLastBlock()) { - mergeLastBlock(other.getLastBlock()); - } - if (other.hasIsLastBlockComplete()) { - setIsLastBlockComplete(other.getIsLastBlockComplete()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasFileLength()) { - - return false; - } - if (!hasUnderConstruction()) { - - return false; - } - if (!hasIsLastBlockComplete()) { - - return false; - } - for (int i = 0; i < getBlocksCount(); i++) { - if (!getBlocks(i).isInitialized()) { - - return false; - } - } - if (hasLastBlock()) { - if (!getLastBlock().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - fileLength_ = input.readUInt64(); - break; - } - case 18: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addBlocks(subBuilder.buildPartial()); - break; - } - case 24: { - bitField0_ |= 0x00000004; - underConstruction_ = input.readBool(); - break; - } - case 34: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(); - if (hasLastBlock()) { - subBuilder.mergeFrom(getLastBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setLastBlock(subBuilder.buildPartial()); - break; - } - case 40: { - bitField0_ |= 0x00000010; - isLastBlockComplete_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required uint64 fileLength = 1; - private long fileLength_ ; - public boolean hasFileLength() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getFileLength() { - return fileLength_; - } - public Builder setFileLength(long value) { - bitField0_ |= 0x00000001; - fileLength_ = value; - onChanged(); - return this; - } - public Builder clearFileLength() { - bitField0_ = (bitField0_ & ~0x00000001); - fileLength_ = 0L; - onChanged(); - return this; - } - - // repeated .LocatedBlockProto blocks = 2; - private java.util.List blocks_ = - java.util.Collections.emptyList(); - private void ensureBlocksIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - blocks_ = new java.util.ArrayList(blocks_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blocksBuilder_; - - public java.util.List getBlocksList() { - if (blocksBuilder_ == null) { - return java.util.Collections.unmodifiableList(blocks_); - } else { - return blocksBuilder_.getMessageList(); - } - } - public int getBlocksCount() { - if (blocksBuilder_ == null) { - return blocks_.size(); - } else { - return blocksBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) { - if (blocksBuilder_ == null) { - return blocks_.get(index); - } else { - return blocksBuilder_.getMessage(index); - } - } - public Builder setBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.set(index, value); - onChanged(); - } else { - blocksBuilder_.setMessage(index, value); - } - return this; - } - public Builder setBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.set(index, builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.add(value); - onChanged(); - } else { - blocksBuilder_.addMessage(value); - } - return this; - } - public Builder addBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.add(index, value); - onChanged(); - } else { - blocksBuilder_.addMessage(index, value); - } - return this; - } - public Builder addBlocks( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.add(builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.add(index, builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllBlocks( - java.lang.Iterable values) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - super.addAll(values, blocks_); - onChanged(); - } else { - blocksBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearBlocks() { - if (blocksBuilder_ == null) { - blocks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - blocksBuilder_.clear(); - } - return this; - } - public Builder removeBlocks(int index) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.remove(index); - onChanged(); - } else { - blocksBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlocksBuilder( - int index) { - return getBlocksFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder( - int index) { - if (blocksBuilder_ == null) { - return blocks_.get(index); } else { - return blocksBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getBlocksOrBuilderList() { - if (blocksBuilder_ != null) { - return blocksBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(blocks_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder() { - return getBlocksFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder( - int index) { - return getBlocksFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()); - } - public java.util.List - getBlocksBuilderList() { - return getBlocksFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> - getBlocksFieldBuilder() { - if (blocksBuilder_ == null) { - blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( - blocks_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - blocks_ = null; - } - return blocksBuilder_; - } - - // required bool underConstruction = 3; - private boolean underConstruction_ ; - public boolean hasUnderConstruction() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public boolean getUnderConstruction() { - return underConstruction_; - } - public Builder setUnderConstruction(boolean value) { - bitField0_ |= 0x00000004; - underConstruction_ = value; - onChanged(); - return this; - } - public Builder clearUnderConstruction() { - bitField0_ = (bitField0_ & ~0x00000004); - underConstruction_ = false; - onChanged(); - return this; - } - - // optional .LocatedBlockProto lastBlock = 4; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> lastBlockBuilder_; - public boolean hasLastBlock() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() { - if (lastBlockBuilder_ == null) { - return lastBlock_; - } else { - return lastBlockBuilder_.getMessage(); - } - } - public Builder setLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (lastBlockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - lastBlock_ = value; - onChanged(); - } else { - lastBlockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder setLastBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { - if (lastBlockBuilder_ == null) { - lastBlock_ = builderForValue.build(); - onChanged(); - } else { - lastBlockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder mergeLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (lastBlockBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008) && - lastBlock_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) { - lastBlock_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(lastBlock_).mergeFrom(value).buildPartial(); - } else { - lastBlock_ = value; - } - onChanged(); - } else { - lastBlockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder clearLastBlock() { - if (lastBlockBuilder_ == null) { - lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - onChanged(); - } else { - lastBlockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getLastBlockBuilder() { - bitField0_ |= 0x00000008; - onChanged(); - return getLastBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() { - if (lastBlockBuilder_ != null) { - return lastBlockBuilder_.getMessageOrBuilder(); - } else { - return lastBlock_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> - getLastBlockFieldBuilder() { - if (lastBlockBuilder_ == null) { - lastBlockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( - lastBlock_, - getParentForChildren(), - isClean()); - lastBlock_ = null; - } - return lastBlockBuilder_; - } - - // required bool isLastBlockComplete = 5; - private boolean isLastBlockComplete_ ; - public boolean hasIsLastBlockComplete() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public boolean getIsLastBlockComplete() { - return isLastBlockComplete_; - } - public Builder setIsLastBlockComplete(boolean value) { - bitField0_ |= 0x00000010; - isLastBlockComplete_ = value; - onChanged(); - return this; - } - public Builder clearIsLastBlockComplete() { - bitField0_ = (bitField0_ & ~0x00000010); - isLastBlockComplete_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:LocatedBlocksProto) - } - - static { - defaultInstance = new LocatedBlocksProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:LocatedBlocksProto) - } - - public interface HdfsFileStatusProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .HdfsFileStatusProto.FileType fileType = 1; - boolean hasFileType(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType(); - - // required bytes path = 2; - boolean hasPath(); - com.google.protobuf.ByteString getPath(); - - // required uint64 length = 3; - boolean hasLength(); - long getLength(); - - // required .FsPermissionProto permission = 4; - boolean hasPermission(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder(); - - // required string owner = 5; - boolean hasOwner(); - String getOwner(); - - // required string group = 6; - boolean hasGroup(); - String getGroup(); - - // required uint64 modification_time = 7; - boolean hasModificationTime(); - long getModificationTime(); - - // required uint64 access_time = 8; - boolean hasAccessTime(); - long getAccessTime(); - - // optional bytes symlink = 9; - boolean hasSymlink(); - com.google.protobuf.ByteString getSymlink(); - - // optional uint32 block_replication = 10; - boolean hasBlockReplication(); - int getBlockReplication(); - - // optional uint64 blocksize = 11; - boolean hasBlocksize(); - long getBlocksize(); - - // optional .LocatedBlocksProto locations = 12; - boolean hasLocations(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder(); - } - public static final class HdfsFileStatusProto extends - com.google.protobuf.GeneratedMessage - implements HdfsFileStatusProtoOrBuilder { - // Use HdfsFileStatusProto.newBuilder() to construct. - private HdfsFileStatusProto(Builder builder) { - super(builder); - } - private HdfsFileStatusProto(boolean noInit) {} - - private static final HdfsFileStatusProto defaultInstance; - public static HdfsFileStatusProto getDefaultInstance() { - return defaultInstance; - } - - public HdfsFileStatusProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_HdfsFileStatusProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_HdfsFileStatusProto_fieldAccessorTable; - } - - public enum FileType - implements com.google.protobuf.ProtocolMessageEnum { - IS_DIR(0, 1), - IS_FILE(1, 2), - IS_SYMLINK(2, 3), - ; - - public static final int IS_DIR_VALUE = 1; - public static final int IS_FILE_VALUE = 2; - public static final int IS_SYMLINK_VALUE = 3; - - - public final int getNumber() { return value; } - - public static FileType valueOf(int value) { - switch (value) { - case 1: return IS_DIR; - case 2: return IS_FILE; - case 3: return IS_SYMLINK; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public FileType findValueByNumber(int number) { - return FileType.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDescriptor().getEnumTypes().get(0); - } - - private static final FileType[] VALUES = { - IS_DIR, IS_FILE, IS_SYMLINK, - }; - - public static FileType valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private FileType(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:HdfsFileStatusProto.FileType) - } - - private int bitField0_; - // required .HdfsFileStatusProto.FileType fileType = 1; - public static final int FILETYPE_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType fileType_; - public boolean hasFileType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() { - return fileType_; - } - - // required bytes path = 2; - public static final int PATH_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString path_; - public boolean hasPath() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public com.google.protobuf.ByteString getPath() { - return path_; - } - - // required uint64 length = 3; - public static final int LENGTH_FIELD_NUMBER = 3; - private long length_; - public boolean hasLength() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getLength() { - return length_; - } - - // required .FsPermissionProto permission = 4; - public static final int PERMISSION_FIELD_NUMBER = 4; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto permission_; - public boolean hasPermission() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission() { - return permission_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { - return permission_; - } - - // required string owner = 5; - public static final int OWNER_FIELD_NUMBER = 5; - private java.lang.Object owner_; - public boolean hasOwner() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public String getOwner() { - java.lang.Object ref = owner_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - owner_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getOwnerBytes() { - java.lang.Object ref = owner_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - owner_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string group = 6; - public static final int GROUP_FIELD_NUMBER = 6; - private java.lang.Object group_; - public boolean hasGroup() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public String getGroup() { - java.lang.Object ref = group_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - group_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getGroupBytes() { - java.lang.Object ref = group_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - group_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required uint64 modification_time = 7; - public static final int MODIFICATION_TIME_FIELD_NUMBER = 7; - private long modificationTime_; - public boolean hasModificationTime() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - public long getModificationTime() { - return modificationTime_; - } - - // required uint64 access_time = 8; - public static final int ACCESS_TIME_FIELD_NUMBER = 8; - private long accessTime_; - public boolean hasAccessTime() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - public long getAccessTime() { - return accessTime_; - } - - // optional bytes symlink = 9; - public static final int SYMLINK_FIELD_NUMBER = 9; - private com.google.protobuf.ByteString symlink_; - public boolean hasSymlink() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - public com.google.protobuf.ByteString getSymlink() { - return symlink_; - } - - // optional uint32 block_replication = 10; - public static final int BLOCK_REPLICATION_FIELD_NUMBER = 10; - private int blockReplication_; - public boolean hasBlockReplication() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - public int getBlockReplication() { - return blockReplication_; - } - - // optional uint64 blocksize = 11; - public static final int BLOCKSIZE_FIELD_NUMBER = 11; - private long blocksize_; - public boolean hasBlocksize() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - public long getBlocksize() { - return blocksize_; - } - - // optional .LocatedBlocksProto locations = 12; - public static final int LOCATIONS_FIELD_NUMBER = 12; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_; - public boolean hasLocations() { - return ((bitField0_ & 0x00000800) == 0x00000800); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() { - return locations_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() { - return locations_; - } - - private void initFields() { - fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR; - path_ = com.google.protobuf.ByteString.EMPTY; - length_ = 0L; - permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - owner_ = ""; - group_ = ""; - modificationTime_ = 0L; - accessTime_ = 0L; - symlink_ = com.google.protobuf.ByteString.EMPTY; - blockReplication_ = 0; - blocksize_ = 0L; - locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasFileType()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasPath()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasLength()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasPermission()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasOwner()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasGroup()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasModificationTime()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasAccessTime()) { - memoizedIsInitialized = 0; - return false; - } - if (!getPermission().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - if (hasLocations()) { - if (!getLocations().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, fileType_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, path_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, length_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeMessage(4, permission_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBytes(5, getOwnerBytes()); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeBytes(6, getGroupBytes()); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeUInt64(7, modificationTime_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - output.writeUInt64(8, accessTime_); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - output.writeBytes(9, symlink_); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - output.writeUInt32(10, blockReplication_); - } - if (((bitField0_ & 0x00000400) == 0x00000400)) { - output.writeUInt64(11, blocksize_); - } - if (((bitField0_ & 0x00000800) == 0x00000800)) { - output.writeMessage(12, locations_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, fileType_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, path_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, length_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, permission_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(5, getOwnerBytes()); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(6, getGroupBytes()); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(7, modificationTime_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(8, accessTime_); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(9, symlink_); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(10, blockReplication_); - } - if (((bitField0_ & 0x00000400) == 0x00000400)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(11, blocksize_); - } - if (((bitField0_ & 0x00000800) == 0x00000800)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(12, locations_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) obj; - - boolean result = true; - result = result && (hasFileType() == other.hasFileType()); - if (hasFileType()) { - result = result && - (getFileType() == other.getFileType()); - } - result = result && (hasPath() == other.hasPath()); - if (hasPath()) { - result = result && getPath() - .equals(other.getPath()); - } - result = result && (hasLength() == other.hasLength()); - if (hasLength()) { - result = result && (getLength() - == other.getLength()); - } - result = result && (hasPermission() == other.hasPermission()); - if (hasPermission()) { - result = result && getPermission() - .equals(other.getPermission()); - } - result = result && (hasOwner() == other.hasOwner()); - if (hasOwner()) { - result = result && getOwner() - .equals(other.getOwner()); - } - result = result && (hasGroup() == other.hasGroup()); - if (hasGroup()) { - result = result && getGroup() - .equals(other.getGroup()); - } - result = result && (hasModificationTime() == other.hasModificationTime()); - if (hasModificationTime()) { - result = result && (getModificationTime() - == other.getModificationTime()); - } - result = result && (hasAccessTime() == other.hasAccessTime()); - if (hasAccessTime()) { - result = result && (getAccessTime() - == other.getAccessTime()); - } - result = result && (hasSymlink() == other.hasSymlink()); - if (hasSymlink()) { - result = result && getSymlink() - .equals(other.getSymlink()); - } - result = result && (hasBlockReplication() == other.hasBlockReplication()); - if (hasBlockReplication()) { - result = result && (getBlockReplication() - == other.getBlockReplication()); - } - result = result && (hasBlocksize() == other.hasBlocksize()); - if (hasBlocksize()) { - result = result && (getBlocksize() - == other.getBlocksize()); - } - result = result && (hasLocations() == other.hasLocations()); - if (hasLocations()) { - result = result && getLocations() - .equals(other.getLocations()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasFileType()) { - hash = (37 * hash) + FILETYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getFileType()); - } - if (hasPath()) { - hash = (37 * hash) + PATH_FIELD_NUMBER; - hash = (53 * hash) + getPath().hashCode(); - } - if (hasLength()) { - hash = (37 * hash) + LENGTH_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getLength()); - } - if (hasPermission()) { - hash = (37 * hash) + PERMISSION_FIELD_NUMBER; - hash = (53 * hash) + getPermission().hashCode(); - } - if (hasOwner()) { - hash = (37 * hash) + OWNER_FIELD_NUMBER; - hash = (53 * hash) + getOwner().hashCode(); - } - if (hasGroup()) { - hash = (37 * hash) + GROUP_FIELD_NUMBER; - hash = (53 * hash) + getGroup().hashCode(); - } - if (hasModificationTime()) { - hash = (37 * hash) + MODIFICATION_TIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getModificationTime()); - } - if (hasAccessTime()) { - hash = (37 * hash) + ACCESS_TIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getAccessTime()); - } - if (hasSymlink()) { - hash = (37 * hash) + SYMLINK_FIELD_NUMBER; - hash = (53 * hash) + getSymlink().hashCode(); - } - if (hasBlockReplication()) { - hash = (37 * hash) + BLOCK_REPLICATION_FIELD_NUMBER; - hash = (53 * hash) + getBlockReplication(); - } - if (hasBlocksize()) { - hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getBlocksize()); - } - if (hasLocations()) { - hash = (37 * hash) + LOCATIONS_FIELD_NUMBER; - hash = (53 * hash) + getLocations().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_HdfsFileStatusProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_HdfsFileStatusProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getPermissionFieldBuilder(); - getLocationsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR; - bitField0_ = (bitField0_ & ~0x00000001); - path_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - length_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - if (permissionBuilder_ == null) { - permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - } else { - permissionBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - owner_ = ""; - bitField0_ = (bitField0_ & ~0x00000010); - group_ = ""; - bitField0_ = (bitField0_ & ~0x00000020); - modificationTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000040); - accessTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000080); - symlink_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000100); - blockReplication_ = 0; - bitField0_ = (bitField0_ & ~0x00000200); - blocksize_ = 0L; - bitField0_ = (bitField0_ & ~0x00000400); - if (locationsBuilder_ == null) { - locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); - } else { - locationsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000800); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.fileType_ = fileType_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.path_ = path_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.length_ = length_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - if (permissionBuilder_ == null) { - result.permission_ = permission_; - } else { - result.permission_ = permissionBuilder_.build(); - } - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.owner_ = owner_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.group_ = group_; - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000040; - } - result.modificationTime_ = modificationTime_; - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000080; - } - result.accessTime_ = accessTime_; - if (((from_bitField0_ & 0x00000100) == 0x00000100)) { - to_bitField0_ |= 0x00000100; - } - result.symlink_ = symlink_; - if (((from_bitField0_ & 0x00000200) == 0x00000200)) { - to_bitField0_ |= 0x00000200; - } - result.blockReplication_ = blockReplication_; - if (((from_bitField0_ & 0x00000400) == 0x00000400)) { - to_bitField0_ |= 0x00000400; - } - result.blocksize_ = blocksize_; - if (((from_bitField0_ & 0x00000800) == 0x00000800)) { - to_bitField0_ |= 0x00000800; - } - if (locationsBuilder_ == null) { - result.locations_ = locations_; - } else { - result.locations_ = locationsBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) return this; - if (other.hasFileType()) { - setFileType(other.getFileType()); - } - if (other.hasPath()) { - setPath(other.getPath()); - } - if (other.hasLength()) { - setLength(other.getLength()); - } - if (other.hasPermission()) { - mergePermission(other.getPermission()); - } - if (other.hasOwner()) { - setOwner(other.getOwner()); - } - if (other.hasGroup()) { - setGroup(other.getGroup()); - } - if (other.hasModificationTime()) { - setModificationTime(other.getModificationTime()); - } - if (other.hasAccessTime()) { - setAccessTime(other.getAccessTime()); - } - if (other.hasSymlink()) { - setSymlink(other.getSymlink()); - } - if (other.hasBlockReplication()) { - setBlockReplication(other.getBlockReplication()); - } - if (other.hasBlocksize()) { - setBlocksize(other.getBlocksize()); - } - if (other.hasLocations()) { - mergeLocations(other.getLocations()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasFileType()) { - - return false; - } - if (!hasPath()) { - - return false; - } - if (!hasLength()) { - - return false; - } - if (!hasPermission()) { - - return false; - } - if (!hasOwner()) { - - return false; - } - if (!hasGroup()) { - - return false; - } - if (!hasModificationTime()) { - - return false; - } - if (!hasAccessTime()) { - - return false; - } - if (!getPermission().isInitialized()) { - - return false; - } - if (hasLocations()) { - if (!getLocations().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - fileType_ = value; - } - break; - } - case 18: { - bitField0_ |= 0x00000002; - path_ = input.readBytes(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - length_ = input.readUInt64(); - break; - } - case 34: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder(); - if (hasPermission()) { - subBuilder.mergeFrom(getPermission()); - } - input.readMessage(subBuilder, extensionRegistry); - setPermission(subBuilder.buildPartial()); - break; - } - case 42: { - bitField0_ |= 0x00000010; - owner_ = input.readBytes(); - break; - } - case 50: { - bitField0_ |= 0x00000020; - group_ = input.readBytes(); - break; - } - case 56: { - bitField0_ |= 0x00000040; - modificationTime_ = input.readUInt64(); - break; - } - case 64: { - bitField0_ |= 0x00000080; - accessTime_ = input.readUInt64(); - break; - } - case 74: { - bitField0_ |= 0x00000100; - symlink_ = input.readBytes(); - break; - } - case 80: { - bitField0_ |= 0x00000200; - blockReplication_ = input.readUInt32(); - break; - } - case 88: { - bitField0_ |= 0x00000400; - blocksize_ = input.readUInt64(); - break; - } - case 98: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder(); - if (hasLocations()) { - subBuilder.mergeFrom(getLocations()); - } - input.readMessage(subBuilder, extensionRegistry); - setLocations(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .HdfsFileStatusProto.FileType fileType = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR; - public boolean hasFileType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() { - return fileType_; - } - public Builder setFileType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - fileType_ = value; - onChanged(); - return this; - } - public Builder clearFileType() { - bitField0_ = (bitField0_ & ~0x00000001); - fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR; - onChanged(); - return this; - } - - // required bytes path = 2; - private com.google.protobuf.ByteString path_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasPath() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public com.google.protobuf.ByteString getPath() { - return path_; - } - public Builder setPath(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - path_ = value; - onChanged(); - return this; - } - public Builder clearPath() { - bitField0_ = (bitField0_ & ~0x00000002); - path_ = getDefaultInstance().getPath(); - onChanged(); - return this; - } - - // required uint64 length = 3; - private long length_ ; - public boolean hasLength() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getLength() { - return length_; - } - public Builder setLength(long value) { - bitField0_ |= 0x00000004; - length_ = value; - onChanged(); - return this; - } - public Builder clearLength() { - bitField0_ = (bitField0_ & ~0x00000004); - length_ = 0L; - onChanged(); - return this; - } - - // required .FsPermissionProto permission = 4; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> permissionBuilder_; - public boolean hasPermission() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission() { - if (permissionBuilder_ == null) { - return permission_; - } else { - return permissionBuilder_.getMessage(); - } - } - public Builder setPermission(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) { - if (permissionBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - permission_ = value; - onChanged(); - } else { - permissionBuilder_.setMessage(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder setPermission( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder builderForValue) { - if (permissionBuilder_ == null) { - permission_ = builderForValue.build(); - onChanged(); - } else { - permissionBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder mergePermission(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) { - if (permissionBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008) && - permission_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance()) { - permission_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder(permission_).mergeFrom(value).buildPartial(); - } else { - permission_ = value; - } - onChanged(); - } else { - permissionBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder clearPermission() { - if (permissionBuilder_ == null) { - permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance(); - onChanged(); - } else { - permissionBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder getPermissionBuilder() { - bitField0_ |= 0x00000008; - onChanged(); - return getPermissionFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() { - if (permissionBuilder_ != null) { - return permissionBuilder_.getMessageOrBuilder(); - } else { - return permission_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> - getPermissionFieldBuilder() { - if (permissionBuilder_ == null) { - permissionBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder>( - permission_, - getParentForChildren(), - isClean()); - permission_ = null; - } - return permissionBuilder_; - } - - // required string owner = 5; - private java.lang.Object owner_ = ""; - public boolean hasOwner() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public String getOwner() { - java.lang.Object ref = owner_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - owner_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setOwner(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000010; - owner_ = value; - onChanged(); - return this; - } - public Builder clearOwner() { - bitField0_ = (bitField0_ & ~0x00000010); - owner_ = getDefaultInstance().getOwner(); - onChanged(); - return this; - } - void setOwner(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000010; - owner_ = value; - onChanged(); - } - - // required string group = 6; - private java.lang.Object group_ = ""; - public boolean hasGroup() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - public String getGroup() { - java.lang.Object ref = group_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - group_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setGroup(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000020; - group_ = value; - onChanged(); - return this; - } - public Builder clearGroup() { - bitField0_ = (bitField0_ & ~0x00000020); - group_ = getDefaultInstance().getGroup(); - onChanged(); - return this; - } - void setGroup(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000020; - group_ = value; - onChanged(); - } - - // required uint64 modification_time = 7; - private long modificationTime_ ; - public boolean hasModificationTime() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - public long getModificationTime() { - return modificationTime_; - } - public Builder setModificationTime(long value) { - bitField0_ |= 0x00000040; - modificationTime_ = value; - onChanged(); - return this; - } - public Builder clearModificationTime() { - bitField0_ = (bitField0_ & ~0x00000040); - modificationTime_ = 0L; - onChanged(); - return this; - } - - // required uint64 access_time = 8; - private long accessTime_ ; - public boolean hasAccessTime() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - public long getAccessTime() { - return accessTime_; - } - public Builder setAccessTime(long value) { - bitField0_ |= 0x00000080; - accessTime_ = value; - onChanged(); - return this; - } - public Builder clearAccessTime() { - bitField0_ = (bitField0_ & ~0x00000080); - accessTime_ = 0L; - onChanged(); - return this; - } - - // optional bytes symlink = 9; - private com.google.protobuf.ByteString symlink_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasSymlink() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - public com.google.protobuf.ByteString getSymlink() { - return symlink_; - } - public Builder setSymlink(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000100; - symlink_ = value; - onChanged(); - return this; - } - public Builder clearSymlink() { - bitField0_ = (bitField0_ & ~0x00000100); - symlink_ = getDefaultInstance().getSymlink(); - onChanged(); - return this; - } - - // optional uint32 block_replication = 10; - private int blockReplication_ ; - public boolean hasBlockReplication() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - public int getBlockReplication() { - return blockReplication_; - } - public Builder setBlockReplication(int value) { - bitField0_ |= 0x00000200; - blockReplication_ = value; - onChanged(); - return this; - } - public Builder clearBlockReplication() { - bitField0_ = (bitField0_ & ~0x00000200); - blockReplication_ = 0; - onChanged(); - return this; - } - - // optional uint64 blocksize = 11; - private long blocksize_ ; - public boolean hasBlocksize() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - public long getBlocksize() { - return blocksize_; - } - public Builder setBlocksize(long value) { - bitField0_ |= 0x00000400; - blocksize_ = value; - onChanged(); - return this; - } - public Builder clearBlocksize() { - bitField0_ = (bitField0_ & ~0x00000400); - blocksize_ = 0L; - onChanged(); - return this; - } - - // optional .LocatedBlocksProto locations = 12; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> locationsBuilder_; - public boolean hasLocations() { - return ((bitField0_ & 0x00000800) == 0x00000800); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() { - if (locationsBuilder_ == null) { - return locations_; - } else { - return locationsBuilder_.getMessage(); - } - } - public Builder setLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) { - if (locationsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - locations_ = value; - onChanged(); - } else { - locationsBuilder_.setMessage(value); - } - bitField0_ |= 0x00000800; - return this; - } - public Builder setLocations( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder builderForValue) { - if (locationsBuilder_ == null) { - locations_ = builderForValue.build(); - onChanged(); - } else { - locationsBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000800; - return this; - } - public Builder mergeLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) { - if (locationsBuilder_ == null) { - if (((bitField0_ & 0x00000800) == 0x00000800) && - locations_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) { - locations_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder(locations_).mergeFrom(value).buildPartial(); - } else { - locations_ = value; - } - onChanged(); - } else { - locationsBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000800; - return this; - } - public Builder clearLocations() { - if (locationsBuilder_ == null) { - locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance(); - onChanged(); - } else { - locationsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000800); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder getLocationsBuilder() { - bitField0_ |= 0x00000800; - onChanged(); - return getLocationsFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() { - if (locationsBuilder_ != null) { - return locationsBuilder_.getMessageOrBuilder(); - } else { - return locations_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> - getLocationsFieldBuilder() { - if (locationsBuilder_ == null) { - locationsBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder>( - locations_, - getParentForChildren(), - isClean()); - locations_ = null; - } - return locationsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:HdfsFileStatusProto) - } - - static { - defaultInstance = new HdfsFileStatusProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:HdfsFileStatusProto) - } - - public interface FsServerDefaultsProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint64 blockSize = 1; - boolean hasBlockSize(); - long getBlockSize(); - - // required uint32 bytesPerChecksum = 2; - boolean hasBytesPerChecksum(); - int getBytesPerChecksum(); - - // required uint32 writePacketSize = 3; - boolean hasWritePacketSize(); - int getWritePacketSize(); - - // required uint32 replication = 4; - boolean hasReplication(); - int getReplication(); - - // required uint32 fileBufferSize = 5; - boolean hasFileBufferSize(); - int getFileBufferSize(); - } - public static final class FsServerDefaultsProto extends - com.google.protobuf.GeneratedMessage - implements FsServerDefaultsProtoOrBuilder { - // Use FsServerDefaultsProto.newBuilder() to construct. - private FsServerDefaultsProto(Builder builder) { - super(builder); - } - private FsServerDefaultsProto(boolean noInit) {} - - private static final FsServerDefaultsProto defaultInstance; - public static FsServerDefaultsProto getDefaultInstance() { - return defaultInstance; - } - - public FsServerDefaultsProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsServerDefaultsProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsServerDefaultsProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint64 blockSize = 1; - public static final int BLOCKSIZE_FIELD_NUMBER = 1; - private long blockSize_; - public boolean hasBlockSize() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getBlockSize() { - return blockSize_; - } - - // required uint32 bytesPerChecksum = 2; - public static final int BYTESPERCHECKSUM_FIELD_NUMBER = 2; - private int bytesPerChecksum_; - public boolean hasBytesPerChecksum() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getBytesPerChecksum() { - return bytesPerChecksum_; - } - - // required uint32 writePacketSize = 3; - public static final int WRITEPACKETSIZE_FIELD_NUMBER = 3; - private int writePacketSize_; - public boolean hasWritePacketSize() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public int getWritePacketSize() { - return writePacketSize_; - } - - // required uint32 replication = 4; - public static final int REPLICATION_FIELD_NUMBER = 4; - private int replication_; - public boolean hasReplication() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public int getReplication() { - return replication_; - } - - // required uint32 fileBufferSize = 5; - public static final int FILEBUFFERSIZE_FIELD_NUMBER = 5; - private int fileBufferSize_; - public boolean hasFileBufferSize() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public int getFileBufferSize() { - return fileBufferSize_; - } - - private void initFields() { - blockSize_ = 0L; - bytesPerChecksum_ = 0; - writePacketSize_ = 0; - replication_ = 0; - fileBufferSize_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlockSize()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBytesPerChecksum()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasWritePacketSize()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasReplication()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasFileBufferSize()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, blockSize_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt32(2, bytesPerChecksum_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt32(3, writePacketSize_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt32(4, replication_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeUInt32(5, fileBufferSize_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, blockSize_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(2, bytesPerChecksum_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(3, writePacketSize_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(4, replication_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(5, fileBufferSize_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) obj; - - boolean result = true; - result = result && (hasBlockSize() == other.hasBlockSize()); - if (hasBlockSize()) { - result = result && (getBlockSize() - == other.getBlockSize()); - } - result = result && (hasBytesPerChecksum() == other.hasBytesPerChecksum()); - if (hasBytesPerChecksum()) { - result = result && (getBytesPerChecksum() - == other.getBytesPerChecksum()); - } - result = result && (hasWritePacketSize() == other.hasWritePacketSize()); - if (hasWritePacketSize()) { - result = result && (getWritePacketSize() - == other.getWritePacketSize()); - } - result = result && (hasReplication() == other.hasReplication()); - if (hasReplication()) { - result = result && (getReplication() - == other.getReplication()); - } - result = result && (hasFileBufferSize() == other.hasFileBufferSize()); - if (hasFileBufferSize()) { - result = result && (getFileBufferSize() - == other.getFileBufferSize()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlockSize()) { - hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getBlockSize()); - } - if (hasBytesPerChecksum()) { - hash = (37 * hash) + BYTESPERCHECKSUM_FIELD_NUMBER; - hash = (53 * hash) + getBytesPerChecksum(); - } - if (hasWritePacketSize()) { - hash = (37 * hash) + WRITEPACKETSIZE_FIELD_NUMBER; - hash = (53 * hash) + getWritePacketSize(); - } - if (hasReplication()) { - hash = (37 * hash) + REPLICATION_FIELD_NUMBER; - hash = (53 * hash) + getReplication(); - } - if (hasFileBufferSize()) { - hash = (37 * hash) + FILEBUFFERSIZE_FIELD_NUMBER; - hash = (53 * hash) + getFileBufferSize(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsServerDefaultsProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsServerDefaultsProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - blockSize_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - bytesPerChecksum_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - writePacketSize_ = 0; - bitField0_ = (bitField0_ & ~0x00000004); - replication_ = 0; - bitField0_ = (bitField0_ & ~0x00000008); - fileBufferSize_ = 0; - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.blockSize_ = blockSize_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.bytesPerChecksum_ = bytesPerChecksum_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.writePacketSize_ = writePacketSize_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.replication_ = replication_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.fileBufferSize_ = fileBufferSize_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance()) return this; - if (other.hasBlockSize()) { - setBlockSize(other.getBlockSize()); - } - if (other.hasBytesPerChecksum()) { - setBytesPerChecksum(other.getBytesPerChecksum()); - } - if (other.hasWritePacketSize()) { - setWritePacketSize(other.getWritePacketSize()); - } - if (other.hasReplication()) { - setReplication(other.getReplication()); - } - if (other.hasFileBufferSize()) { - setFileBufferSize(other.getFileBufferSize()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlockSize()) { - - return false; - } - if (!hasBytesPerChecksum()) { - - return false; - } - if (!hasWritePacketSize()) { - - return false; - } - if (!hasReplication()) { - - return false; - } - if (!hasFileBufferSize()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - blockSize_ = input.readUInt64(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - bytesPerChecksum_ = input.readUInt32(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - writePacketSize_ = input.readUInt32(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - replication_ = input.readUInt32(); - break; - } - case 40: { - bitField0_ |= 0x00000010; - fileBufferSize_ = input.readUInt32(); - break; - } - } - } - } - - private int bitField0_; - - // required uint64 blockSize = 1; - private long blockSize_ ; - public boolean hasBlockSize() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getBlockSize() { - return blockSize_; - } - public Builder setBlockSize(long value) { - bitField0_ |= 0x00000001; - blockSize_ = value; - onChanged(); - return this; - } - public Builder clearBlockSize() { - bitField0_ = (bitField0_ & ~0x00000001); - blockSize_ = 0L; - onChanged(); - return this; - } - - // required uint32 bytesPerChecksum = 2; - private int bytesPerChecksum_ ; - public boolean hasBytesPerChecksum() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getBytesPerChecksum() { - return bytesPerChecksum_; - } - public Builder setBytesPerChecksum(int value) { - bitField0_ |= 0x00000002; - bytesPerChecksum_ = value; - onChanged(); - return this; - } - public Builder clearBytesPerChecksum() { - bitField0_ = (bitField0_ & ~0x00000002); - bytesPerChecksum_ = 0; - onChanged(); - return this; - } - - // required uint32 writePacketSize = 3; - private int writePacketSize_ ; - public boolean hasWritePacketSize() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public int getWritePacketSize() { - return writePacketSize_; - } - public Builder setWritePacketSize(int value) { - bitField0_ |= 0x00000004; - writePacketSize_ = value; - onChanged(); - return this; - } - public Builder clearWritePacketSize() { - bitField0_ = (bitField0_ & ~0x00000004); - writePacketSize_ = 0; - onChanged(); - return this; - } - - // required uint32 replication = 4; - private int replication_ ; - public boolean hasReplication() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public int getReplication() { - return replication_; - } - public Builder setReplication(int value) { - bitField0_ |= 0x00000008; - replication_ = value; - onChanged(); - return this; - } - public Builder clearReplication() { - bitField0_ = (bitField0_ & ~0x00000008); - replication_ = 0; - onChanged(); - return this; - } - - // required uint32 fileBufferSize = 5; - private int fileBufferSize_ ; - public boolean hasFileBufferSize() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public int getFileBufferSize() { - return fileBufferSize_; - } - public Builder setFileBufferSize(int value) { - bitField0_ |= 0x00000010; - fileBufferSize_ = value; - onChanged(); - return this; - } - public Builder clearFileBufferSize() { - bitField0_ = (bitField0_ & ~0x00000010); - fileBufferSize_ = 0; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:FsServerDefaultsProto) - } - - static { - defaultInstance = new FsServerDefaultsProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:FsServerDefaultsProto) - } - - public interface DirectoryListingProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .HdfsFileStatusProto partialListing = 1; - java.util.List - getPartialListingList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index); - int getPartialListingCount(); - java.util.List - getPartialListingOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( - int index); - - // required uint32 remainingEntries = 2; - boolean hasRemainingEntries(); - int getRemainingEntries(); - } - public static final class DirectoryListingProto extends - com.google.protobuf.GeneratedMessage - implements DirectoryListingProtoOrBuilder { - // Use DirectoryListingProto.newBuilder() to construct. - private DirectoryListingProto(Builder builder) { - super(builder); - } - private DirectoryListingProto(boolean noInit) {} - - private static final DirectoryListingProto defaultInstance; - public static DirectoryListingProto getDefaultInstance() { - return defaultInstance; - } - - public DirectoryListingProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DirectoryListingProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DirectoryListingProto_fieldAccessorTable; - } - - private int bitField0_; - // repeated .HdfsFileStatusProto partialListing = 1; - public static final int PARTIALLISTING_FIELD_NUMBER = 1; - private java.util.List partialListing_; - public java.util.List getPartialListingList() { - return partialListing_; - } - public java.util.List - getPartialListingOrBuilderList() { - return partialListing_; - } - public int getPartialListingCount() { - return partialListing_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) { - return partialListing_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( - int index) { - return partialListing_.get(index); - } - - // required uint32 remainingEntries = 2; - public static final int REMAININGENTRIES_FIELD_NUMBER = 2; - private int remainingEntries_; - public boolean hasRemainingEntries() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public int getRemainingEntries() { - return remainingEntries_; - } - - private void initFields() { - partialListing_ = java.util.Collections.emptyList(); - remainingEntries_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRemainingEntries()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getPartialListingCount(); i++) { - if (!getPartialListing(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < partialListing_.size(); i++) { - output.writeMessage(1, partialListing_.get(i)); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt32(2, remainingEntries_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < partialListing_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, partialListing_.get(i)); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(2, remainingEntries_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) obj; - - boolean result = true; - result = result && getPartialListingList() - .equals(other.getPartialListingList()); - result = result && (hasRemainingEntries() == other.hasRemainingEntries()); - if (hasRemainingEntries()) { - result = result && (getRemainingEntries() - == other.getRemainingEntries()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getPartialListingCount() > 0) { - hash = (37 * hash) + PARTIALLISTING_FIELD_NUMBER; - hash = (53 * hash) + getPartialListingList().hashCode(); - } - if (hasRemainingEntries()) { - hash = (37 * hash) + REMAININGENTRIES_FIELD_NUMBER; - hash = (53 * hash) + getRemainingEntries(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DirectoryListingProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DirectoryListingProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getPartialListingFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (partialListingBuilder_ == null) { - partialListing_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - partialListingBuilder_.clear(); - } - remainingEntries_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (partialListingBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - partialListing_ = java.util.Collections.unmodifiableList(partialListing_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.partialListing_ = partialListing_; - } else { - result.partialListing_ = partialListingBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000001; - } - result.remainingEntries_ = remainingEntries_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance()) return this; - if (partialListingBuilder_ == null) { - if (!other.partialListing_.isEmpty()) { - if (partialListing_.isEmpty()) { - partialListing_ = other.partialListing_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensurePartialListingIsMutable(); - partialListing_.addAll(other.partialListing_); - } - onChanged(); - } - } else { - if (!other.partialListing_.isEmpty()) { - if (partialListingBuilder_.isEmpty()) { - partialListingBuilder_.dispose(); - partialListingBuilder_ = null; - partialListing_ = other.partialListing_; - bitField0_ = (bitField0_ & ~0x00000001); - partialListingBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getPartialListingFieldBuilder() : null; - } else { - partialListingBuilder_.addAllMessages(other.partialListing_); - } - } - } - if (other.hasRemainingEntries()) { - setRemainingEntries(other.getRemainingEntries()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRemainingEntries()) { - - return false; - } - for (int i = 0; i < getPartialListingCount(); i++) { - if (!getPartialListing(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addPartialListing(subBuilder.buildPartial()); - break; - } - case 16: { - bitField0_ |= 0x00000002; - remainingEntries_ = input.readUInt32(); - break; - } - } - } - } - - private int bitField0_; - - // repeated .HdfsFileStatusProto partialListing = 1; - private java.util.List partialListing_ = - java.util.Collections.emptyList(); - private void ensurePartialListingIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - partialListing_ = new java.util.ArrayList(partialListing_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> partialListingBuilder_; - - public java.util.List getPartialListingList() { - if (partialListingBuilder_ == null) { - return java.util.Collections.unmodifiableList(partialListing_); - } else { - return partialListingBuilder_.getMessageList(); - } - } - public int getPartialListingCount() { - if (partialListingBuilder_ == null) { - return partialListing_.size(); - } else { - return partialListingBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) { - if (partialListingBuilder_ == null) { - return partialListing_.get(index); - } else { - return partialListingBuilder_.getMessage(index); - } - } - public Builder setPartialListing( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { - if (partialListingBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensurePartialListingIsMutable(); - partialListing_.set(index, value); - onChanged(); - } else { - partialListingBuilder_.setMessage(index, value); - } - return this; - } - public Builder setPartialListing( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { - if (partialListingBuilder_ == null) { - ensurePartialListingIsMutable(); - partialListing_.set(index, builderForValue.build()); - onChanged(); - } else { - partialListingBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addPartialListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { - if (partialListingBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensurePartialListingIsMutable(); - partialListing_.add(value); - onChanged(); - } else { - partialListingBuilder_.addMessage(value); - } - return this; - } - public Builder addPartialListing( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) { - if (partialListingBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensurePartialListingIsMutable(); - partialListing_.add(index, value); - onChanged(); - } else { - partialListingBuilder_.addMessage(index, value); - } - return this; - } - public Builder addPartialListing( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { - if (partialListingBuilder_ == null) { - ensurePartialListingIsMutable(); - partialListing_.add(builderForValue.build()); - onChanged(); - } else { - partialListingBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addPartialListing( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) { - if (partialListingBuilder_ == null) { - ensurePartialListingIsMutable(); - partialListing_.add(index, builderForValue.build()); - onChanged(); - } else { - partialListingBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllPartialListing( - java.lang.Iterable values) { - if (partialListingBuilder_ == null) { - ensurePartialListingIsMutable(); - super.addAll(values, partialListing_); - onChanged(); - } else { - partialListingBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearPartialListing() { - if (partialListingBuilder_ == null) { - partialListing_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - partialListingBuilder_.clear(); - } - return this; - } - public Builder removePartialListing(int index) { - if (partialListingBuilder_ == null) { - ensurePartialListingIsMutable(); - partialListing_.remove(index); - onChanged(); - } else { - partialListingBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getPartialListingBuilder( - int index) { - return getPartialListingFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder( - int index) { - if (partialListingBuilder_ == null) { - return partialListing_.get(index); } else { - return partialListingBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getPartialListingOrBuilderList() { - if (partialListingBuilder_ != null) { - return partialListingBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(partialListing_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder() { - return getPartialListingFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder( - int index) { - return getPartialListingFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()); - } - public java.util.List - getPartialListingBuilderList() { - return getPartialListingFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> - getPartialListingFieldBuilder() { - if (partialListingBuilder_ == null) { - partialListingBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>( - partialListing_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - partialListing_ = null; - } - return partialListingBuilder_; - } - - // required uint32 remainingEntries = 2; - private int remainingEntries_ ; - public boolean hasRemainingEntries() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getRemainingEntries() { - return remainingEntries_; - } - public Builder setRemainingEntries(int value) { - bitField0_ |= 0x00000002; - remainingEntries_ = value; - onChanged(); - return this; - } - public Builder clearRemainingEntries() { - bitField0_ = (bitField0_ & ~0x00000002); - remainingEntries_ = 0; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:DirectoryListingProto) - } - - static { - defaultInstance = new DirectoryListingProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:DirectoryListingProto) - } - - public interface UpgradeStatusReportProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint32 version = 1; - boolean hasVersion(); - int getVersion(); - - // required uint32 upgradeStatus = 2; - boolean hasUpgradeStatus(); - int getUpgradeStatus(); - } - public static final class UpgradeStatusReportProto extends - com.google.protobuf.GeneratedMessage - implements UpgradeStatusReportProtoOrBuilder { - // Use UpgradeStatusReportProto.newBuilder() to construct. - private UpgradeStatusReportProto(Builder builder) { - super(builder); - } - private UpgradeStatusReportProto(boolean noInit) {} - - private static final UpgradeStatusReportProto defaultInstance; - public static UpgradeStatusReportProto getDefaultInstance() { - return defaultInstance; - } - - public UpgradeStatusReportProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_UpgradeStatusReportProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_UpgradeStatusReportProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint32 version = 1; - public static final int VERSION_FIELD_NUMBER = 1; - private int version_; - public boolean hasVersion() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public int getVersion() { - return version_; - } - - // required uint32 upgradeStatus = 2; - public static final int UPGRADESTATUS_FIELD_NUMBER = 2; - private int upgradeStatus_; - public boolean hasUpgradeStatus() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getUpgradeStatus() { - return upgradeStatus_; - } - - private void initFields() { - version_ = 0; - upgradeStatus_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasVersion()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasUpgradeStatus()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt32(1, version_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt32(2, upgradeStatus_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(1, version_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(2, upgradeStatus_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto) obj; - - boolean result = true; - result = result && (hasVersion() == other.hasVersion()); - if (hasVersion()) { - result = result && (getVersion() - == other.getVersion()); - } - result = result && (hasUpgradeStatus() == other.hasUpgradeStatus()); - if (hasUpgradeStatus()) { - result = result && (getUpgradeStatus() - == other.getUpgradeStatus()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasVersion()) { - hash = (37 * hash) + VERSION_FIELD_NUMBER; - hash = (53 * hash) + getVersion(); - } - if (hasUpgradeStatus()) { - hash = (37 * hash) + UPGRADESTATUS_FIELD_NUMBER; - hash = (53 * hash) + getUpgradeStatus(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_UpgradeStatusReportProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_UpgradeStatusReportProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - version_ = 0; - bitField0_ = (bitField0_ & ~0x00000001); - upgradeStatus_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.version_ = version_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.upgradeStatus_ = upgradeStatus_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.getDefaultInstance()) return this; - if (other.hasVersion()) { - setVersion(other.getVersion()); - } - if (other.hasUpgradeStatus()) { - setUpgradeStatus(other.getUpgradeStatus()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasVersion()) { - - return false; - } - if (!hasUpgradeStatus()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - version_ = input.readUInt32(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - upgradeStatus_ = input.readUInt32(); - break; - } - } - } - } - - private int bitField0_; - - // required uint32 version = 1; - private int version_ ; - public boolean hasVersion() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public int getVersion() { - return version_; - } - public Builder setVersion(int value) { - bitField0_ |= 0x00000001; - version_ = value; - onChanged(); - return this; - } - public Builder clearVersion() { - bitField0_ = (bitField0_ & ~0x00000001); - version_ = 0; - onChanged(); - return this; - } - - // required uint32 upgradeStatus = 2; - private int upgradeStatus_ ; - public boolean hasUpgradeStatus() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getUpgradeStatus() { - return upgradeStatus_; - } - public Builder setUpgradeStatus(int value) { - bitField0_ |= 0x00000002; - upgradeStatus_ = value; - onChanged(); - return this; - } - public Builder clearUpgradeStatus() { - bitField0_ = (bitField0_ & ~0x00000002); - upgradeStatus_ = 0; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:UpgradeStatusReportProto) - } - - static { - defaultInstance = new UpgradeStatusReportProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:UpgradeStatusReportProto) - } - - public interface StorageInfoProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint32 layoutVersion = 1; - boolean hasLayoutVersion(); - int getLayoutVersion(); - - // required uint32 namespceID = 2; - boolean hasNamespceID(); - int getNamespceID(); - - // required string clusterID = 3; - boolean hasClusterID(); - String getClusterID(); - - // required uint64 cTime = 4; - boolean hasCTime(); - long getCTime(); - } - public static final class StorageInfoProto extends - com.google.protobuf.GeneratedMessage - implements StorageInfoProtoOrBuilder { - // Use StorageInfoProto.newBuilder() to construct. - private StorageInfoProto(Builder builder) { - super(builder); - } - private StorageInfoProto(boolean noInit) {} - - private static final StorageInfoProto defaultInstance; - public static StorageInfoProto getDefaultInstance() { - return defaultInstance; - } - - public StorageInfoProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_StorageInfoProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_StorageInfoProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint32 layoutVersion = 1; - public static final int LAYOUTVERSION_FIELD_NUMBER = 1; - private int layoutVersion_; - public boolean hasLayoutVersion() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public int getLayoutVersion() { - return layoutVersion_; - } - - // required uint32 namespceID = 2; - public static final int NAMESPCEID_FIELD_NUMBER = 2; - private int namespceID_; - public boolean hasNamespceID() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getNamespceID() { - return namespceID_; - } - - // required string clusterID = 3; - public static final int CLUSTERID_FIELD_NUMBER = 3; - private java.lang.Object clusterID_; - public boolean hasClusterID() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getClusterID() { - java.lang.Object ref = clusterID_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - clusterID_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getClusterIDBytes() { - java.lang.Object ref = clusterID_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - clusterID_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required uint64 cTime = 4; - public static final int CTIME_FIELD_NUMBER = 4; - private long cTime_; - public boolean hasCTime() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public long getCTime() { - return cTime_; - } - - private void initFields() { - layoutVersion_ = 0; - namespceID_ = 0; - clusterID_ = ""; - cTime_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasLayoutVersion()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasNamespceID()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasClusterID()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCTime()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt32(1, layoutVersion_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt32(2, namespceID_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getClusterIDBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt64(4, cTime_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(1, layoutVersion_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(2, namespceID_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getClusterIDBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(4, cTime_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto) obj; - - boolean result = true; - result = result && (hasLayoutVersion() == other.hasLayoutVersion()); - if (hasLayoutVersion()) { - result = result && (getLayoutVersion() - == other.getLayoutVersion()); - } - result = result && (hasNamespceID() == other.hasNamespceID()); - if (hasNamespceID()) { - result = result && (getNamespceID() - == other.getNamespceID()); - } - result = result && (hasClusterID() == other.hasClusterID()); - if (hasClusterID()) { - result = result && getClusterID() - .equals(other.getClusterID()); - } - result = result && (hasCTime() == other.hasCTime()); - if (hasCTime()) { - result = result && (getCTime() - == other.getCTime()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasLayoutVersion()) { - hash = (37 * hash) + LAYOUTVERSION_FIELD_NUMBER; - hash = (53 * hash) + getLayoutVersion(); - } - if (hasNamespceID()) { - hash = (37 * hash) + NAMESPCEID_FIELD_NUMBER; - hash = (53 * hash) + getNamespceID(); - } - if (hasClusterID()) { - hash = (37 * hash) + CLUSTERID_FIELD_NUMBER; - hash = (53 * hash) + getClusterID().hashCode(); - } - if (hasCTime()) { - hash = (37 * hash) + CTIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCTime()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_StorageInfoProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_StorageInfoProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - layoutVersion_ = 0; - bitField0_ = (bitField0_ & ~0x00000001); - namespceID_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - clusterID_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - cTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.layoutVersion_ = layoutVersion_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.namespceID_ = namespceID_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.clusterID_ = clusterID_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.cTime_ = cTime_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) return this; - if (other.hasLayoutVersion()) { - setLayoutVersion(other.getLayoutVersion()); - } - if (other.hasNamespceID()) { - setNamespceID(other.getNamespceID()); - } - if (other.hasClusterID()) { - setClusterID(other.getClusterID()); - } - if (other.hasCTime()) { - setCTime(other.getCTime()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasLayoutVersion()) { - - return false; - } - if (!hasNamespceID()) { - - return false; - } - if (!hasClusterID()) { - - return false; - } - if (!hasCTime()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - layoutVersion_ = input.readUInt32(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - namespceID_ = input.readUInt32(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - clusterID_ = input.readBytes(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - cTime_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required uint32 layoutVersion = 1; - private int layoutVersion_ ; - public boolean hasLayoutVersion() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public int getLayoutVersion() { - return layoutVersion_; - } - public Builder setLayoutVersion(int value) { - bitField0_ |= 0x00000001; - layoutVersion_ = value; - onChanged(); - return this; - } - public Builder clearLayoutVersion() { - bitField0_ = (bitField0_ & ~0x00000001); - layoutVersion_ = 0; - onChanged(); - return this; - } - - // required uint32 namespceID = 2; - private int namespceID_ ; - public boolean hasNamespceID() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getNamespceID() { - return namespceID_; - } - public Builder setNamespceID(int value) { - bitField0_ |= 0x00000002; - namespceID_ = value; - onChanged(); - return this; - } - public Builder clearNamespceID() { - bitField0_ = (bitField0_ & ~0x00000002); - namespceID_ = 0; - onChanged(); - return this; - } - - // required string clusterID = 3; - private java.lang.Object clusterID_ = ""; - public boolean hasClusterID() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getClusterID() { - java.lang.Object ref = clusterID_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - clusterID_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setClusterID(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - clusterID_ = value; - onChanged(); - return this; - } - public Builder clearClusterID() { - bitField0_ = (bitField0_ & ~0x00000004); - clusterID_ = getDefaultInstance().getClusterID(); - onChanged(); - return this; - } - void setClusterID(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000004; - clusterID_ = value; - onChanged(); - } - - // required uint64 cTime = 4; - private long cTime_ ; - public boolean hasCTime() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public long getCTime() { - return cTime_; - } - public Builder setCTime(long value) { - bitField0_ |= 0x00000008; - cTime_ = value; - onChanged(); - return this; - } - public Builder clearCTime() { - bitField0_ = (bitField0_ & ~0x00000008); - cTime_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:StorageInfoProto) - } - - static { - defaultInstance = new StorageInfoProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:StorageInfoProto) - } - - public interface NamenodeRegistrationProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string rpcAddress = 1; - boolean hasRpcAddress(); - String getRpcAddress(); - - // required string httpAddress = 2; - boolean hasHttpAddress(); - String getHttpAddress(); - - // required .StorageInfoProto storageInfo = 3; - boolean hasStorageInfo(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder(); - - // optional .NamenodeRegistrationProto.NamenodeRoleProto role = 4; - boolean hasRole(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto getRole(); - } - public static final class NamenodeRegistrationProto extends - com.google.protobuf.GeneratedMessage - implements NamenodeRegistrationProtoOrBuilder { - // Use NamenodeRegistrationProto.newBuilder() to construct. - private NamenodeRegistrationProto(Builder builder) { - super(builder); - } - private NamenodeRegistrationProto(boolean noInit) {} - - private static final NamenodeRegistrationProto defaultInstance; - public static NamenodeRegistrationProto getDefaultInstance() { - return defaultInstance; - } - - public NamenodeRegistrationProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeRegistrationProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeRegistrationProto_fieldAccessorTable; - } - - public enum NamenodeRoleProto - implements com.google.protobuf.ProtocolMessageEnum { - NAMENODE(0, 1), - BACKUP(1, 2), - CHECKPOINT(2, 3), - ; - - public static final int NAMENODE_VALUE = 1; - public static final int BACKUP_VALUE = 2; - public static final int CHECKPOINT_VALUE = 3; - - - public final int getNumber() { return value; } - - public static NamenodeRoleProto valueOf(int value) { - switch (value) { - case 1: return NAMENODE; - case 2: return BACKUP; - case 3: return CHECKPOINT; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public NamenodeRoleProto findValueByNumber(int number) { - return NamenodeRoleProto.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDescriptor().getEnumTypes().get(0); - } - - private static final NamenodeRoleProto[] VALUES = { - NAMENODE, BACKUP, CHECKPOINT, - }; - - public static NamenodeRoleProto valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private NamenodeRoleProto(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:NamenodeRegistrationProto.NamenodeRoleProto) - } - - private int bitField0_; - // required string rpcAddress = 1; - public static final int RPCADDRESS_FIELD_NUMBER = 1; - private java.lang.Object rpcAddress_; - public boolean hasRpcAddress() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getRpcAddress() { - java.lang.Object ref = rpcAddress_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - rpcAddress_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getRpcAddressBytes() { - java.lang.Object ref = rpcAddress_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - rpcAddress_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string httpAddress = 2; - public static final int HTTPADDRESS_FIELD_NUMBER = 2; - private java.lang.Object httpAddress_; - public boolean hasHttpAddress() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getHttpAddress() { - java.lang.Object ref = httpAddress_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - httpAddress_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getHttpAddressBytes() { - java.lang.Object ref = httpAddress_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - httpAddress_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required .StorageInfoProto storageInfo = 3; - public static final int STORAGEINFO_FIELD_NUMBER = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_; - public boolean hasStorageInfo() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { - return storageInfo_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { - return storageInfo_; - } - - // optional .NamenodeRegistrationProto.NamenodeRoleProto role = 4; - public static final int ROLE_FIELD_NUMBER = 4; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto role_; - public boolean hasRole() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto getRole() { - return role_; - } - - private void initFields() { - rpcAddress_ = ""; - httpAddress_ = ""; - storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRpcAddress()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasHttpAddress()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasStorageInfo()) { - memoizedIsInitialized = 0; - return false; - } - if (!getStorageInfo().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getRpcAddressBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getHttpAddressBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(3, storageInfo_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeEnum(4, role_.getNumber()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getRpcAddressBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getHttpAddressBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, storageInfo_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(4, role_.getNumber()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto) obj; - - boolean result = true; - result = result && (hasRpcAddress() == other.hasRpcAddress()); - if (hasRpcAddress()) { - result = result && getRpcAddress() - .equals(other.getRpcAddress()); - } - result = result && (hasHttpAddress() == other.hasHttpAddress()); - if (hasHttpAddress()) { - result = result && getHttpAddress() - .equals(other.getHttpAddress()); - } - result = result && (hasStorageInfo() == other.hasStorageInfo()); - if (hasStorageInfo()) { - result = result && getStorageInfo() - .equals(other.getStorageInfo()); - } - result = result && (hasRole() == other.hasRole()); - if (hasRole()) { - result = result && - (getRole() == other.getRole()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRpcAddress()) { - hash = (37 * hash) + RPCADDRESS_FIELD_NUMBER; - hash = (53 * hash) + getRpcAddress().hashCode(); - } - if (hasHttpAddress()) { - hash = (37 * hash) + HTTPADDRESS_FIELD_NUMBER; - hash = (53 * hash) + getHttpAddress().hashCode(); - } - if (hasStorageInfo()) { - hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER; - hash = (53 * hash) + getStorageInfo().hashCode(); - } - if (hasRole()) { - hash = (37 * hash) + ROLE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getRole()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeRegistrationProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeRegistrationProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getStorageInfoFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - rpcAddress_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - httpAddress_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - if (storageInfoBuilder_ == null) { - storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - } else { - storageInfoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE; - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.rpcAddress_ = rpcAddress_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.httpAddress_ = httpAddress_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - if (storageInfoBuilder_ == null) { - result.storageInfo_ = storageInfo_; - } else { - result.storageInfo_ = storageInfoBuilder_.build(); - } - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.role_ = role_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) return this; - if (other.hasRpcAddress()) { - setRpcAddress(other.getRpcAddress()); - } - if (other.hasHttpAddress()) { - setHttpAddress(other.getHttpAddress()); - } - if (other.hasStorageInfo()) { - mergeStorageInfo(other.getStorageInfo()); - } - if (other.hasRole()) { - setRole(other.getRole()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRpcAddress()) { - - return false; - } - if (!hasHttpAddress()) { - - return false; - } - if (!hasStorageInfo()) { - - return false; - } - if (!getStorageInfo().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - rpcAddress_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - httpAddress_ = input.readBytes(); - break; - } - case 26: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(); - if (hasStorageInfo()) { - subBuilder.mergeFrom(getStorageInfo()); - } - input.readMessage(subBuilder, extensionRegistry); - setStorageInfo(subBuilder.buildPartial()); - break; - } - case 32: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(4, rawValue); - } else { - bitField0_ |= 0x00000008; - role_ = value; - } - break; - } - } - } - } - - private int bitField0_; - - // required string rpcAddress = 1; - private java.lang.Object rpcAddress_ = ""; - public boolean hasRpcAddress() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getRpcAddress() { - java.lang.Object ref = rpcAddress_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - rpcAddress_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setRpcAddress(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - rpcAddress_ = value; - onChanged(); - return this; - } - public Builder clearRpcAddress() { - bitField0_ = (bitField0_ & ~0x00000001); - rpcAddress_ = getDefaultInstance().getRpcAddress(); - onChanged(); - return this; - } - void setRpcAddress(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - rpcAddress_ = value; - onChanged(); - } - - // required string httpAddress = 2; - private java.lang.Object httpAddress_ = ""; - public boolean hasHttpAddress() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getHttpAddress() { - java.lang.Object ref = httpAddress_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - httpAddress_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setHttpAddress(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - httpAddress_ = value; - onChanged(); - return this; - } - public Builder clearHttpAddress() { - bitField0_ = (bitField0_ & ~0x00000002); - httpAddress_ = getDefaultInstance().getHttpAddress(); - onChanged(); - return this; - } - void setHttpAddress(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - httpAddress_ = value; - onChanged(); - } - - // required .StorageInfoProto storageInfo = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_; - public boolean hasStorageInfo() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { - if (storageInfoBuilder_ == null) { - return storageInfo_; - } else { - return storageInfoBuilder_.getMessage(); - } - } - public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { - if (storageInfoBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - storageInfo_ = value; - onChanged(); - } else { - storageInfoBuilder_.setMessage(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder setStorageInfo( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) { - if (storageInfoBuilder_ == null) { - storageInfo_ = builderForValue.build(); - onChanged(); - } else { - storageInfoBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { - if (storageInfoBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && - storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) { - storageInfo_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial(); - } else { - storageInfo_ = value; - } - onChanged(); - } else { - storageInfoBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder clearStorageInfo() { - if (storageInfoBuilder_ == null) { - storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - onChanged(); - } else { - storageInfoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getStorageInfoBuilder() { - bitField0_ |= 0x00000004; - onChanged(); - return getStorageInfoFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { - if (storageInfoBuilder_ != null) { - return storageInfoBuilder_.getMessageOrBuilder(); - } else { - return storageInfo_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> - getStorageInfoFieldBuilder() { - if (storageInfoBuilder_ == null) { - storageInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>( - storageInfo_, - getParentForChildren(), - isClean()); - storageInfo_ = null; - } - return storageInfoBuilder_; - } - - // optional .NamenodeRegistrationProto.NamenodeRoleProto role = 4; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE; - public boolean hasRole() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto getRole() { - return role_; - } - public Builder setRole(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - role_ = value; - onChanged(); - return this; - } - public Builder clearRole() { - bitField0_ = (bitField0_ & ~0x00000008); - role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:NamenodeRegistrationProto) - } - - static { - defaultInstance = new NamenodeRegistrationProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:NamenodeRegistrationProto) - } - - public interface CheckpointSignatureProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string blockPoolId = 1; - boolean hasBlockPoolId(); - String getBlockPoolId(); - - // required uint64 mostRecentCheckpointTxId = 2; - boolean hasMostRecentCheckpointTxId(); - long getMostRecentCheckpointTxId(); - - // required uint64 curSegmentTxId = 3; - boolean hasCurSegmentTxId(); - long getCurSegmentTxId(); - - // required .StorageInfoProto storageInfo = 4; - boolean hasStorageInfo(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder(); - } - public static final class CheckpointSignatureProto extends - com.google.protobuf.GeneratedMessage - implements CheckpointSignatureProtoOrBuilder { - // Use CheckpointSignatureProto.newBuilder() to construct. - private CheckpointSignatureProto(Builder builder) { - super(builder); - } - private CheckpointSignatureProto(boolean noInit) {} - - private static final CheckpointSignatureProto defaultInstance; - public static CheckpointSignatureProto getDefaultInstance() { - return defaultInstance; - } - - public CheckpointSignatureProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointSignatureProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointSignatureProto_fieldAccessorTable; - } - - private int bitField0_; - // required string blockPoolId = 1; - public static final int BLOCKPOOLID_FIELD_NUMBER = 1; - private java.lang.Object blockPoolId_; - public boolean hasBlockPoolId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getBlockPoolId() { - java.lang.Object ref = blockPoolId_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - blockPoolId_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getBlockPoolIdBytes() { - java.lang.Object ref = blockPoolId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - blockPoolId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required uint64 mostRecentCheckpointTxId = 2; - public static final int MOSTRECENTCHECKPOINTTXID_FIELD_NUMBER = 2; - private long mostRecentCheckpointTxId_; - public boolean hasMostRecentCheckpointTxId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getMostRecentCheckpointTxId() { - return mostRecentCheckpointTxId_; - } - - // required uint64 curSegmentTxId = 3; - public static final int CURSEGMENTTXID_FIELD_NUMBER = 3; - private long curSegmentTxId_; - public boolean hasCurSegmentTxId() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getCurSegmentTxId() { - return curSegmentTxId_; - } - - // required .StorageInfoProto storageInfo = 4; - public static final int STORAGEINFO_FIELD_NUMBER = 4; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_; - public boolean hasStorageInfo() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { - return storageInfo_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { - return storageInfo_; - } - - private void initFields() { - blockPoolId_ = ""; - mostRecentCheckpointTxId_ = 0L; - curSegmentTxId_ = 0L; - storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlockPoolId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasMostRecentCheckpointTxId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCurSegmentTxId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasStorageInfo()) { - memoizedIsInitialized = 0; - return false; - } - if (!getStorageInfo().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getBlockPoolIdBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, mostRecentCheckpointTxId_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, curSegmentTxId_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeMessage(4, storageInfo_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getBlockPoolIdBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, mostRecentCheckpointTxId_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, curSegmentTxId_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, storageInfo_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto) obj; - - boolean result = true; - result = result && (hasBlockPoolId() == other.hasBlockPoolId()); - if (hasBlockPoolId()) { - result = result && getBlockPoolId() - .equals(other.getBlockPoolId()); - } - result = result && (hasMostRecentCheckpointTxId() == other.hasMostRecentCheckpointTxId()); - if (hasMostRecentCheckpointTxId()) { - result = result && (getMostRecentCheckpointTxId() - == other.getMostRecentCheckpointTxId()); - } - result = result && (hasCurSegmentTxId() == other.hasCurSegmentTxId()); - if (hasCurSegmentTxId()) { - result = result && (getCurSegmentTxId() - == other.getCurSegmentTxId()); - } - result = result && (hasStorageInfo() == other.hasStorageInfo()); - if (hasStorageInfo()) { - result = result && getStorageInfo() - .equals(other.getStorageInfo()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlockPoolId()) { - hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; - hash = (53 * hash) + getBlockPoolId().hashCode(); - } - if (hasMostRecentCheckpointTxId()) { - hash = (37 * hash) + MOSTRECENTCHECKPOINTTXID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getMostRecentCheckpointTxId()); - } - if (hasCurSegmentTxId()) { - hash = (37 * hash) + CURSEGMENTTXID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCurSegmentTxId()); - } - if (hasStorageInfo()) { - hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER; - hash = (53 * hash) + getStorageInfo().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointSignatureProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointSignatureProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getStorageInfoFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - blockPoolId_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - mostRecentCheckpointTxId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - curSegmentTxId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - if (storageInfoBuilder_ == null) { - storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - } else { - storageInfoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.blockPoolId_ = blockPoolId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.mostRecentCheckpointTxId_ = mostRecentCheckpointTxId_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.curSegmentTxId_ = curSegmentTxId_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - if (storageInfoBuilder_ == null) { - result.storageInfo_ = storageInfo_; - } else { - result.storageInfo_ = storageInfoBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance()) return this; - if (other.hasBlockPoolId()) { - setBlockPoolId(other.getBlockPoolId()); - } - if (other.hasMostRecentCheckpointTxId()) { - setMostRecentCheckpointTxId(other.getMostRecentCheckpointTxId()); - } - if (other.hasCurSegmentTxId()) { - setCurSegmentTxId(other.getCurSegmentTxId()); - } - if (other.hasStorageInfo()) { - mergeStorageInfo(other.getStorageInfo()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlockPoolId()) { - - return false; - } - if (!hasMostRecentCheckpointTxId()) { - - return false; - } - if (!hasCurSegmentTxId()) { - - return false; - } - if (!hasStorageInfo()) { - - return false; - } - if (!getStorageInfo().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - blockPoolId_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - mostRecentCheckpointTxId_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - curSegmentTxId_ = input.readUInt64(); - break; - } - case 34: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(); - if (hasStorageInfo()) { - subBuilder.mergeFrom(getStorageInfo()); - } - input.readMessage(subBuilder, extensionRegistry); - setStorageInfo(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required string blockPoolId = 1; - private java.lang.Object blockPoolId_ = ""; - public boolean hasBlockPoolId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getBlockPoolId() { - java.lang.Object ref = blockPoolId_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - blockPoolId_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setBlockPoolId(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - blockPoolId_ = value; - onChanged(); - return this; - } - public Builder clearBlockPoolId() { - bitField0_ = (bitField0_ & ~0x00000001); - blockPoolId_ = getDefaultInstance().getBlockPoolId(); - onChanged(); - return this; - } - void setBlockPoolId(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - blockPoolId_ = value; - onChanged(); - } - - // required uint64 mostRecentCheckpointTxId = 2; - private long mostRecentCheckpointTxId_ ; - public boolean hasMostRecentCheckpointTxId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getMostRecentCheckpointTxId() { - return mostRecentCheckpointTxId_; - } - public Builder setMostRecentCheckpointTxId(long value) { - bitField0_ |= 0x00000002; - mostRecentCheckpointTxId_ = value; - onChanged(); - return this; - } - public Builder clearMostRecentCheckpointTxId() { - bitField0_ = (bitField0_ & ~0x00000002); - mostRecentCheckpointTxId_ = 0L; - onChanged(); - return this; - } - - // required uint64 curSegmentTxId = 3; - private long curSegmentTxId_ ; - public boolean hasCurSegmentTxId() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getCurSegmentTxId() { - return curSegmentTxId_; - } - public Builder setCurSegmentTxId(long value) { - bitField0_ |= 0x00000004; - curSegmentTxId_ = value; - onChanged(); - return this; - } - public Builder clearCurSegmentTxId() { - bitField0_ = (bitField0_ & ~0x00000004); - curSegmentTxId_ = 0L; - onChanged(); - return this; - } - - // required .StorageInfoProto storageInfo = 4; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_; - public boolean hasStorageInfo() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { - if (storageInfoBuilder_ == null) { - return storageInfo_; - } else { - return storageInfoBuilder_.getMessage(); - } - } - public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { - if (storageInfoBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - storageInfo_ = value; - onChanged(); - } else { - storageInfoBuilder_.setMessage(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder setStorageInfo( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) { - if (storageInfoBuilder_ == null) { - storageInfo_ = builderForValue.build(); - onChanged(); - } else { - storageInfoBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { - if (storageInfoBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008) && - storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) { - storageInfo_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial(); - } else { - storageInfo_ = value; - } - onChanged(); - } else { - storageInfoBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder clearStorageInfo() { - if (storageInfoBuilder_ == null) { - storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - onChanged(); - } else { - storageInfoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getStorageInfoBuilder() { - bitField0_ |= 0x00000008; - onChanged(); - return getStorageInfoFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { - if (storageInfoBuilder_ != null) { - return storageInfoBuilder_.getMessageOrBuilder(); - } else { - return storageInfo_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> - getStorageInfoFieldBuilder() { - if (storageInfoBuilder_ == null) { - storageInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>( - storageInfo_, - getParentForChildren(), - isClean()); - storageInfo_ = null; - } - return storageInfoBuilder_; - } - - // @@protoc_insertion_point(builder_scope:CheckpointSignatureProto) - } - - static { - defaultInstance = new CheckpointSignatureProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:CheckpointSignatureProto) - } - - public interface NamenodeCommandProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint32 action = 1; - boolean hasAction(); - int getAction(); - - // required .NamenodeCommandProto.Type type = 2; - boolean hasType(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type getType(); - - // optional .CheckpointCommandProto checkpointCmd = 3; - boolean hasCheckpointCmd(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto getCheckpointCmd(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder getCheckpointCmdOrBuilder(); - } - public static final class NamenodeCommandProto extends - com.google.protobuf.GeneratedMessage - implements NamenodeCommandProtoOrBuilder { - // Use NamenodeCommandProto.newBuilder() to construct. - private NamenodeCommandProto(Builder builder) { - super(builder); - } - private NamenodeCommandProto(boolean noInit) {} - - private static final NamenodeCommandProto defaultInstance; - public static NamenodeCommandProto getDefaultInstance() { - return defaultInstance; - } - - public NamenodeCommandProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeCommandProto_fieldAccessorTable; - } - - public enum Type - implements com.google.protobuf.ProtocolMessageEnum { - NamenodeCommand(0, 0), - CheckPointCommand(1, 1), - ; - - public static final int NamenodeCommand_VALUE = 0; - public static final int CheckPointCommand_VALUE = 1; - - - public final int getNumber() { return value; } - - public static Type valueOf(int value) { - switch (value) { - case 0: return NamenodeCommand; - case 1: return CheckPointCommand; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public Type findValueByNumber(int number) { - return Type.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDescriptor().getEnumTypes().get(0); - } - - private static final Type[] VALUES = { - NamenodeCommand, CheckPointCommand, - }; - - public static Type valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private Type(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:NamenodeCommandProto.Type) - } - - private int bitField0_; - // required uint32 action = 1; - public static final int ACTION_FIELD_NUMBER = 1; - private int action_; - public boolean hasAction() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public int getAction() { - return action_; - } - - // required .NamenodeCommandProto.Type type = 2; - public static final int TYPE_FIELD_NUMBER = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type type_; - public boolean hasType() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type getType() { - return type_; - } - - // optional .CheckpointCommandProto checkpointCmd = 3; - public static final int CHECKPOINTCMD_FIELD_NUMBER = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto checkpointCmd_; - public boolean hasCheckpointCmd() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto getCheckpointCmd() { - return checkpointCmd_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder getCheckpointCmdOrBuilder() { - return checkpointCmd_; - } - - private void initFields() { - action_ = 0; - type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.NamenodeCommand; - checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasAction()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasType()) { - memoizedIsInitialized = 0; - return false; - } - if (hasCheckpointCmd()) { - if (!getCheckpointCmd().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt32(1, action_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeEnum(2, type_.getNumber()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(3, checkpointCmd_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(1, action_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(2, type_.getNumber()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, checkpointCmd_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto) obj; - - boolean result = true; - result = result && (hasAction() == other.hasAction()); - if (hasAction()) { - result = result && (getAction() - == other.getAction()); - } - result = result && (hasType() == other.hasType()); - if (hasType()) { - result = result && - (getType() == other.getType()); - } - result = result && (hasCheckpointCmd() == other.hasCheckpointCmd()); - if (hasCheckpointCmd()) { - result = result && getCheckpointCmd() - .equals(other.getCheckpointCmd()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasAction()) { - hash = (37 * hash) + ACTION_FIELD_NUMBER; - hash = (53 * hash) + getAction(); - } - if (hasType()) { - hash = (37 * hash) + TYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getType()); - } - if (hasCheckpointCmd()) { - hash = (37 * hash) + CHECKPOINTCMD_FIELD_NUMBER; - hash = (53 * hash) + getCheckpointCmd().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeCommandProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getCheckpointCmdFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - action_ = 0; - bitField0_ = (bitField0_ & ~0x00000001); - type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.NamenodeCommand; - bitField0_ = (bitField0_ & ~0x00000002); - if (checkpointCmdBuilder_ == null) { - checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance(); - } else { - checkpointCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.action_ = action_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.type_ = type_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - if (checkpointCmdBuilder_ == null) { - result.checkpointCmd_ = checkpointCmd_; - } else { - result.checkpointCmd_ = checkpointCmdBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance()) return this; - if (other.hasAction()) { - setAction(other.getAction()); - } - if (other.hasType()) { - setType(other.getType()); - } - if (other.hasCheckpointCmd()) { - mergeCheckpointCmd(other.getCheckpointCmd()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasAction()) { - - return false; - } - if (!hasType()) { - - return false; - } - if (hasCheckpointCmd()) { - if (!getCheckpointCmd().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - action_ = input.readUInt32(); - break; - } - case 16: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(2, rawValue); - } else { - bitField0_ |= 0x00000002; - type_ = value; - } - break; - } - case 26: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.newBuilder(); - if (hasCheckpointCmd()) { - subBuilder.mergeFrom(getCheckpointCmd()); - } - input.readMessage(subBuilder, extensionRegistry); - setCheckpointCmd(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required uint32 action = 1; - private int action_ ; - public boolean hasAction() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public int getAction() { - return action_; - } - public Builder setAction(int value) { - bitField0_ |= 0x00000001; - action_ = value; - onChanged(); - return this; - } - public Builder clearAction() { - bitField0_ = (bitField0_ & ~0x00000001); - action_ = 0; - onChanged(); - return this; - } - - // required .NamenodeCommandProto.Type type = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.NamenodeCommand; - public boolean hasType() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type getType() { - return type_; - } - public Builder setType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - type_ = value; - onChanged(); - return this; - } - public Builder clearType() { - bitField0_ = (bitField0_ & ~0x00000002); - type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.NamenodeCommand; - onChanged(); - return this; - } - - // optional .CheckpointCommandProto checkpointCmd = 3; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder> checkpointCmdBuilder_; - public boolean hasCheckpointCmd() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto getCheckpointCmd() { - if (checkpointCmdBuilder_ == null) { - return checkpointCmd_; - } else { - return checkpointCmdBuilder_.getMessage(); - } - } - public Builder setCheckpointCmd(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto value) { - if (checkpointCmdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - checkpointCmd_ = value; - onChanged(); - } else { - checkpointCmdBuilder_.setMessage(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder setCheckpointCmd( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder builderForValue) { - if (checkpointCmdBuilder_ == null) { - checkpointCmd_ = builderForValue.build(); - onChanged(); - } else { - checkpointCmdBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder mergeCheckpointCmd(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto value) { - if (checkpointCmdBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && - checkpointCmd_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance()) { - checkpointCmd_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.newBuilder(checkpointCmd_).mergeFrom(value).buildPartial(); - } else { - checkpointCmd_ = value; - } - onChanged(); - } else { - checkpointCmdBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000004; - return this; - } - public Builder clearCheckpointCmd() { - if (checkpointCmdBuilder_ == null) { - checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance(); - onChanged(); - } else { - checkpointCmdBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder getCheckpointCmdBuilder() { - bitField0_ |= 0x00000004; - onChanged(); - return getCheckpointCmdFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder getCheckpointCmdOrBuilder() { - if (checkpointCmdBuilder_ != null) { - return checkpointCmdBuilder_.getMessageOrBuilder(); - } else { - return checkpointCmd_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder> - getCheckpointCmdFieldBuilder() { - if (checkpointCmdBuilder_ == null) { - checkpointCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder>( - checkpointCmd_, - getParentForChildren(), - isClean()); - checkpointCmd_ = null; - } - return checkpointCmdBuilder_; - } - - // @@protoc_insertion_point(builder_scope:NamenodeCommandProto) - } - - static { - defaultInstance = new NamenodeCommandProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:NamenodeCommandProto) - } - - public interface CheckpointCommandProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .CheckpointSignatureProto signature = 1; - boolean hasSignature(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder(); - - // required bool needToReturnImage = 2; - boolean hasNeedToReturnImage(); - boolean getNeedToReturnImage(); - } - public static final class CheckpointCommandProto extends - com.google.protobuf.GeneratedMessage - implements CheckpointCommandProtoOrBuilder { - // Use CheckpointCommandProto.newBuilder() to construct. - private CheckpointCommandProto(Builder builder) { - super(builder); - } - private CheckpointCommandProto(boolean noInit) {} - - private static final CheckpointCommandProto defaultInstance; - public static CheckpointCommandProto getDefaultInstance() { - return defaultInstance; - } - - public CheckpointCommandProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointCommandProto_fieldAccessorTable; - } - - private int bitField0_; - // required .CheckpointSignatureProto signature = 1; - public static final int SIGNATURE_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto signature_; - public boolean hasSignature() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature() { - return signature_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { - return signature_; - } - - // required bool needToReturnImage = 2; - public static final int NEEDTORETURNIMAGE_FIELD_NUMBER = 2; - private boolean needToReturnImage_; - public boolean hasNeedToReturnImage() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public boolean getNeedToReturnImage() { - return needToReturnImage_; - } - - private void initFields() { - signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); - needToReturnImage_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSignature()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasNeedToReturnImage()) { - memoizedIsInitialized = 0; - return false; - } - if (!getSignature().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, signature_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(2, needToReturnImage_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, signature_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(2, needToReturnImage_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto) obj; - - boolean result = true; - result = result && (hasSignature() == other.hasSignature()); - if (hasSignature()) { - result = result && getSignature() - .equals(other.getSignature()); - } - result = result && (hasNeedToReturnImage() == other.hasNeedToReturnImage()); - if (hasNeedToReturnImage()) { - result = result && (getNeedToReturnImage() - == other.getNeedToReturnImage()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSignature()) { - hash = (37 * hash) + SIGNATURE_FIELD_NUMBER; - hash = (53 * hash) + getSignature().hashCode(); - } - if (hasNeedToReturnImage()) { - hash = (37 * hash) + NEEDTORETURNIMAGE_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getNeedToReturnImage()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointCommandProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointCommandProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getSignatureFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (signatureBuilder_ == null) { - signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); - } else { - signatureBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - needToReturnImage_ = false; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (signatureBuilder_ == null) { - result.signature_ = signature_; - } else { - result.signature_ = signatureBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.needToReturnImage_ = needToReturnImage_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance()) return this; - if (other.hasSignature()) { - mergeSignature(other.getSignature()); - } - if (other.hasNeedToReturnImage()) { - setNeedToReturnImage(other.getNeedToReturnImage()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSignature()) { - - return false; - } - if (!hasNeedToReturnImage()) { - - return false; - } - if (!getSignature().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder(); - if (hasSignature()) { - subBuilder.mergeFrom(getSignature()); - } - input.readMessage(subBuilder, extensionRegistry); - setSignature(subBuilder.buildPartial()); - break; - } - case 16: { - bitField0_ |= 0x00000002; - needToReturnImage_ = input.readBool(); - break; - } - } - } - } - - private int bitField0_; - - // required .CheckpointSignatureProto signature = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder> signatureBuilder_; - public boolean hasSignature() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature() { - if (signatureBuilder_ == null) { - return signature_; - } else { - return signatureBuilder_.getMessage(); - } - } - public Builder setSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto value) { - if (signatureBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - signature_ = value; - onChanged(); - } else { - signatureBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setSignature( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder builderForValue) { - if (signatureBuilder_ == null) { - signature_ = builderForValue.build(); - onChanged(); - } else { - signatureBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto value) { - if (signatureBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - signature_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance()) { - signature_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder(signature_).mergeFrom(value).buildPartial(); - } else { - signature_ = value; - } - onChanged(); - } else { - signatureBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearSignature() { - if (signatureBuilder_ == null) { - signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); - onChanged(); - } else { - signatureBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder getSignatureBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getSignatureFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { - if (signatureBuilder_ != null) { - return signatureBuilder_.getMessageOrBuilder(); - } else { - return signature_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder> - getSignatureFieldBuilder() { - if (signatureBuilder_ == null) { - signatureBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder>( - signature_, - getParentForChildren(), - isClean()); - signature_ = null; - } - return signatureBuilder_; - } - - // required bool needToReturnImage = 2; - private boolean needToReturnImage_ ; - public boolean hasNeedToReturnImage() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public boolean getNeedToReturnImage() { - return needToReturnImage_; - } - public Builder setNeedToReturnImage(boolean value) { - bitField0_ |= 0x00000002; - needToReturnImage_ = value; - onChanged(); - return this; - } - public Builder clearNeedToReturnImage() { - bitField0_ = (bitField0_ & ~0x00000002); - needToReturnImage_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:CheckpointCommandProto) - } - - static { - defaultInstance = new CheckpointCommandProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:CheckpointCommandProto) - } - - public interface BlockProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint64 blockId = 1; - boolean hasBlockId(); - long getBlockId(); - - // required uint64 genStamp = 2; - boolean hasGenStamp(); - long getGenStamp(); - - // optional uint64 numBytes = 3; - boolean hasNumBytes(); - long getNumBytes(); - } - public static final class BlockProto extends - com.google.protobuf.GeneratedMessage - implements BlockProtoOrBuilder { - // Use BlockProto.newBuilder() to construct. - private BlockProto(Builder builder) { - super(builder); - } - private BlockProto(boolean noInit) {} - - private static final BlockProto defaultInstance; - public static BlockProto getDefaultInstance() { - return defaultInstance; - } - - public BlockProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint64 blockId = 1; - public static final int BLOCKID_FIELD_NUMBER = 1; - private long blockId_; - public boolean hasBlockId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getBlockId() { - return blockId_; - } - - // required uint64 genStamp = 2; - public static final int GENSTAMP_FIELD_NUMBER = 2; - private long genStamp_; - public boolean hasGenStamp() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getGenStamp() { - return genStamp_; - } - - // optional uint64 numBytes = 3; - public static final int NUMBYTES_FIELD_NUMBER = 3; - private long numBytes_; - public boolean hasNumBytes() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getNumBytes() { - return numBytes_; - } - - private void initFields() { - blockId_ = 0L; - genStamp_ = 0L; - numBytes_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlockId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasGenStamp()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, blockId_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, genStamp_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, numBytes_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, blockId_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, genStamp_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, numBytes_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) obj; - - boolean result = true; - result = result && (hasBlockId() == other.hasBlockId()); - if (hasBlockId()) { - result = result && (getBlockId() - == other.getBlockId()); - } - result = result && (hasGenStamp() == other.hasGenStamp()); - if (hasGenStamp()) { - result = result && (getGenStamp() - == other.getGenStamp()); - } - result = result && (hasNumBytes() == other.hasNumBytes()); - if (hasNumBytes()) { - result = result && (getNumBytes() - == other.getNumBytes()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlockId()) { - hash = (37 * hash) + BLOCKID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getBlockId()); - } - if (hasGenStamp()) { - hash = (37 * hash) + GENSTAMP_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getGenStamp()); - } - if (hasNumBytes()) { - hash = (37 * hash) + NUMBYTES_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getNumBytes()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - blockId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - genStamp_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - numBytes_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.blockId_ = blockId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.genStamp_ = genStamp_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.numBytes_ = numBytes_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) return this; - if (other.hasBlockId()) { - setBlockId(other.getBlockId()); - } - if (other.hasGenStamp()) { - setGenStamp(other.getGenStamp()); - } - if (other.hasNumBytes()) { - setNumBytes(other.getNumBytes()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlockId()) { - - return false; - } - if (!hasGenStamp()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - blockId_ = input.readUInt64(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - genStamp_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - numBytes_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required uint64 blockId = 1; - private long blockId_ ; - public boolean hasBlockId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getBlockId() { - return blockId_; - } - public Builder setBlockId(long value) { - bitField0_ |= 0x00000001; - blockId_ = value; - onChanged(); - return this; - } - public Builder clearBlockId() { - bitField0_ = (bitField0_ & ~0x00000001); - blockId_ = 0L; - onChanged(); - return this; - } - - // required uint64 genStamp = 2; - private long genStamp_ ; - public boolean hasGenStamp() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getGenStamp() { - return genStamp_; - } - public Builder setGenStamp(long value) { - bitField0_ |= 0x00000002; - genStamp_ = value; - onChanged(); - return this; - } - public Builder clearGenStamp() { - bitField0_ = (bitField0_ & ~0x00000002); - genStamp_ = 0L; - onChanged(); - return this; - } - - // optional uint64 numBytes = 3; - private long numBytes_ ; - public boolean hasNumBytes() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getNumBytes() { - return numBytes_; - } - public Builder setNumBytes(long value) { - bitField0_ |= 0x00000004; - numBytes_ = value; - onChanged(); - return this; - } - public Builder clearNumBytes() { - bitField0_ = (bitField0_ & ~0x00000004); - numBytes_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:BlockProto) - } - - static { - defaultInstance = new BlockProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:BlockProto) - } - - public interface BlockWithLocationsProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .BlockProto block = 1; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder(); - - // repeated string datanodeIDs = 2; - java.util.List getDatanodeIDsList(); - int getDatanodeIDsCount(); - String getDatanodeIDs(int index); - } - public static final class BlockWithLocationsProto extends - com.google.protobuf.GeneratedMessage - implements BlockWithLocationsProtoOrBuilder { - // Use BlockWithLocationsProto.newBuilder() to construct. - private BlockWithLocationsProto(Builder builder) { - super(builder); - } - private BlockWithLocationsProto(boolean noInit) {} - - private static final BlockWithLocationsProto defaultInstance; - public static BlockWithLocationsProto getDefaultInstance() { - return defaultInstance; - } - - public BlockWithLocationsProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockWithLocationsProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockWithLocationsProto_fieldAccessorTable; - } - - private int bitField0_; - // required .BlockProto block = 1; - public static final int BLOCK_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - // repeated string datanodeIDs = 2; - public static final int DATANODEIDS_FIELD_NUMBER = 2; - private com.google.protobuf.LazyStringList datanodeIDs_; - public java.util.List - getDatanodeIDsList() { - return datanodeIDs_; - } - public int getDatanodeIDsCount() { - return datanodeIDs_.size(); - } - public String getDatanodeIDs(int index) { - return datanodeIDs_.get(index); - } - - private void initFields() { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); - datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, block_); - } - for (int i = 0; i < datanodeIDs_.size(); i++) { - output.writeBytes(2, datanodeIDs_.getByteString(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, block_); - } - { - int dataSize = 0; - for (int i = 0; i < datanodeIDs_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeBytesSizeNoTag(datanodeIDs_.getByteString(i)); - } - size += dataSize; - size += 1 * getDatanodeIDsList().size(); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto) obj; - - boolean result = true; - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && getDatanodeIDsList() - .equals(other.getDatanodeIDsList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - if (getDatanodeIDsCount() > 0) { - hash = (37 * hash) + DATANODEIDS_FIELD_NUMBER; - hash = (53 * hash) + getDatanodeIDsList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockWithLocationsProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockWithLocationsProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - datanodeIDs_ = new com.google.protobuf.UnmodifiableLazyStringList( - datanodeIDs_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.datanodeIDs_ = datanodeIDs_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance()) return this; - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - if (!other.datanodeIDs_.isEmpty()) { - if (datanodeIDs_.isEmpty()) { - datanodeIDs_ = other.datanodeIDs_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureDatanodeIDsIsMutable(); - datanodeIDs_.addAll(other.datanodeIDs_); - } - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlock()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - case 18: { - ensureDatanodeIDsIsMutable(); - datanodeIDs_.add(input.readBytes()); - break; - } - } - } - } - - private int bitField0_; - - // required .BlockProto block = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // repeated string datanodeIDs = 2; - private com.google.protobuf.LazyStringList datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensureDatanodeIDsIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - datanodeIDs_ = new com.google.protobuf.LazyStringArrayList(datanodeIDs_); - bitField0_ |= 0x00000002; - } - } - public java.util.List - getDatanodeIDsList() { - return java.util.Collections.unmodifiableList(datanodeIDs_); - } - public int getDatanodeIDsCount() { - return datanodeIDs_.size(); - } - public String getDatanodeIDs(int index) { - return datanodeIDs_.get(index); - } - public Builder setDatanodeIDs( - int index, String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureDatanodeIDsIsMutable(); - datanodeIDs_.set(index, value); - onChanged(); - return this; - } - public Builder addDatanodeIDs(String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureDatanodeIDsIsMutable(); - datanodeIDs_.add(value); - onChanged(); - return this; - } - public Builder addAllDatanodeIDs( - java.lang.Iterable values) { - ensureDatanodeIDsIsMutable(); - super.addAll(values, datanodeIDs_); - onChanged(); - return this; - } - public Builder clearDatanodeIDs() { - datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - return this; - } - void addDatanodeIDs(com.google.protobuf.ByteString value) { - ensureDatanodeIDsIsMutable(); - datanodeIDs_.add(value); - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:BlockWithLocationsProto) - } - - static { - defaultInstance = new BlockWithLocationsProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:BlockWithLocationsProto) - } - - public interface BlocksWithLocationsProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .BlockWithLocationsProto blocks = 1; - java.util.List - getBlocksList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto getBlocks(int index); - int getBlocksCount(); - java.util.List - getBlocksOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder getBlocksOrBuilder( - int index); - } - public static final class BlocksWithLocationsProto extends - com.google.protobuf.GeneratedMessage - implements BlocksWithLocationsProtoOrBuilder { - // Use BlocksWithLocationsProto.newBuilder() to construct. - private BlocksWithLocationsProto(Builder builder) { - super(builder); - } - private BlocksWithLocationsProto(boolean noInit) {} - - private static final BlocksWithLocationsProto defaultInstance; - public static BlocksWithLocationsProto getDefaultInstance() { - return defaultInstance; - } - - public BlocksWithLocationsProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlocksWithLocationsProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlocksWithLocationsProto_fieldAccessorTable; - } - - // repeated .BlockWithLocationsProto blocks = 1; - public static final int BLOCKS_FIELD_NUMBER = 1; - private java.util.List blocks_; - public java.util.List getBlocksList() { - return blocks_; - } - public java.util.List - getBlocksOrBuilderList() { - return blocks_; - } - public int getBlocksCount() { - return blocks_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto getBlocks(int index) { - return blocks_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder getBlocksOrBuilder( - int index) { - return blocks_.get(index); - } - - private void initFields() { - blocks_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getBlocksCount(); i++) { - if (!getBlocks(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < blocks_.size(); i++) { - output.writeMessage(1, blocks_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < blocks_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, blocks_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto) obj; - - boolean result = true; - result = result && getBlocksList() - .equals(other.getBlocksList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getBlocksCount() > 0) { - hash = (37 * hash) + BLOCKS_FIELD_NUMBER; - hash = (53 * hash) + getBlocksList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlocksWithLocationsProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlocksWithLocationsProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlocksFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blocksBuilder_ == null) { - blocks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - blocksBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto(this); - int from_bitField0_ = bitField0_; - if (blocksBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - blocks_ = java.util.Collections.unmodifiableList(blocks_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.blocks_ = blocks_; - } else { - result.blocks_ = blocksBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.getDefaultInstance()) return this; - if (blocksBuilder_ == null) { - if (!other.blocks_.isEmpty()) { - if (blocks_.isEmpty()) { - blocks_ = other.blocks_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureBlocksIsMutable(); - blocks_.addAll(other.blocks_); - } - onChanged(); - } - } else { - if (!other.blocks_.isEmpty()) { - if (blocksBuilder_.isEmpty()) { - blocksBuilder_.dispose(); - blocksBuilder_ = null; - blocks_ = other.blocks_; - bitField0_ = (bitField0_ & ~0x00000001); - blocksBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getBlocksFieldBuilder() : null; - } else { - blocksBuilder_.addAllMessages(other.blocks_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getBlocksCount(); i++) { - if (!getBlocks(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addBlocks(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // repeated .BlockWithLocationsProto blocks = 1; - private java.util.List blocks_ = - java.util.Collections.emptyList(); - private void ensureBlocksIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - blocks_ = new java.util.ArrayList(blocks_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder> blocksBuilder_; - - public java.util.List getBlocksList() { - if (blocksBuilder_ == null) { - return java.util.Collections.unmodifiableList(blocks_); - } else { - return blocksBuilder_.getMessageList(); - } - } - public int getBlocksCount() { - if (blocksBuilder_ == null) { - return blocks_.size(); - } else { - return blocksBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto getBlocks(int index) { - if (blocksBuilder_ == null) { - return blocks_.get(index); - } else { - return blocksBuilder_.getMessage(index); - } - } - public Builder setBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.set(index, value); - onChanged(); - } else { - blocksBuilder_.setMessage(index, value); - } - return this; - } - public Builder setBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.set(index, builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.add(value); - onChanged(); - } else { - blocksBuilder_.addMessage(value); - } - return this; - } - public Builder addBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureBlocksIsMutable(); - blocks_.add(index, value); - onChanged(); - } else { - blocksBuilder_.addMessage(index, value); - } - return this; - } - public Builder addBlocks( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.add(builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addBlocks( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.add(index, builderForValue.build()); - onChanged(); - } else { - blocksBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllBlocks( - java.lang.Iterable values) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - super.addAll(values, blocks_); - onChanged(); - } else { - blocksBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearBlocks() { - if (blocksBuilder_ == null) { - blocks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - blocksBuilder_.clear(); - } - return this; - } - public Builder removeBlocks(int index) { - if (blocksBuilder_ == null) { - ensureBlocksIsMutable(); - blocks_.remove(index); - onChanged(); - } else { - blocksBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder getBlocksBuilder( - int index) { - return getBlocksFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder getBlocksOrBuilder( - int index) { - if (blocksBuilder_ == null) { - return blocks_.get(index); } else { - return blocksBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getBlocksOrBuilderList() { - if (blocksBuilder_ != null) { - return blocksBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(blocks_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder addBlocksBuilder() { - return getBlocksFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder addBlocksBuilder( - int index) { - return getBlocksFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance()); - } - public java.util.List - getBlocksBuilderList() { - return getBlocksFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder> - getBlocksFieldBuilder() { - if (blocksBuilder_ == null) { - blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder>( - blocks_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - blocks_ = null; - } - return blocksBuilder_; - } - - // @@protoc_insertion_point(builder_scope:BlocksWithLocationsProto) - } - - static { - defaultInstance = new BlocksWithLocationsProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:BlocksWithLocationsProto) - } - - public interface RemoteEditLogProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint64 startTxId = 1; - boolean hasStartTxId(); - long getStartTxId(); - - // required uint64 endTxId = 2; - boolean hasEndTxId(); - long getEndTxId(); - } - public static final class RemoteEditLogProto extends - com.google.protobuf.GeneratedMessage - implements RemoteEditLogProtoOrBuilder { - // Use RemoteEditLogProto.newBuilder() to construct. - private RemoteEditLogProto(Builder builder) { - super(builder); - } - private RemoteEditLogProto(boolean noInit) {} - - private static final RemoteEditLogProto defaultInstance; - public static RemoteEditLogProto getDefaultInstance() { - return defaultInstance; - } - - public RemoteEditLogProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint64 startTxId = 1; - public static final int STARTTXID_FIELD_NUMBER = 1; - private long startTxId_; - public boolean hasStartTxId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getStartTxId() { - return startTxId_; - } - - // required uint64 endTxId = 2; - public static final int ENDTXID_FIELD_NUMBER = 2; - private long endTxId_; - public boolean hasEndTxId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getEndTxId() { - return endTxId_; - } - - private void initFields() { - startTxId_ = 0L; - endTxId_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasStartTxId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasEndTxId()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, startTxId_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, endTxId_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, startTxId_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, endTxId_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto) obj; - - boolean result = true; - result = result && (hasStartTxId() == other.hasStartTxId()); - if (hasStartTxId()) { - result = result && (getStartTxId() - == other.getStartTxId()); - } - result = result && (hasEndTxId() == other.hasEndTxId()); - if (hasEndTxId()) { - result = result && (getEndTxId() - == other.getEndTxId()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasStartTxId()) { - hash = (37 * hash) + STARTTXID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getStartTxId()); - } - if (hasEndTxId()) { - hash = (37 * hash) + ENDTXID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getEndTxId()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - startTxId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - endTxId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.startTxId_ = startTxId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.endTxId_ = endTxId_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.getDefaultInstance()) return this; - if (other.hasStartTxId()) { - setStartTxId(other.getStartTxId()); - } - if (other.hasEndTxId()) { - setEndTxId(other.getEndTxId()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasStartTxId()) { - - return false; - } - if (!hasEndTxId()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - startTxId_ = input.readUInt64(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - endTxId_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required uint64 startTxId = 1; - private long startTxId_ ; - public boolean hasStartTxId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getStartTxId() { - return startTxId_; - } - public Builder setStartTxId(long value) { - bitField0_ |= 0x00000001; - startTxId_ = value; - onChanged(); - return this; - } - public Builder clearStartTxId() { - bitField0_ = (bitField0_ & ~0x00000001); - startTxId_ = 0L; - onChanged(); - return this; - } - - // required uint64 endTxId = 2; - private long endTxId_ ; - public boolean hasEndTxId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getEndTxId() { - return endTxId_; - } - public Builder setEndTxId(long value) { - bitField0_ |= 0x00000002; - endTxId_ = value; - onChanged(); - return this; - } - public Builder clearEndTxId() { - bitField0_ = (bitField0_ & ~0x00000002); - endTxId_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:RemoteEditLogProto) - } - - static { - defaultInstance = new RemoteEditLogProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RemoteEditLogProto) - } - - public interface RemoteEditLogManifestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .RemoteEditLogProto logs = 1; - java.util.List - getLogsList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto getLogs(int index); - int getLogsCount(); - java.util.List - getLogsOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder getLogsOrBuilder( - int index); - } - public static final class RemoteEditLogManifestProto extends - com.google.protobuf.GeneratedMessage - implements RemoteEditLogManifestProtoOrBuilder { - // Use RemoteEditLogManifestProto.newBuilder() to construct. - private RemoteEditLogManifestProto(Builder builder) { - super(builder); - } - private RemoteEditLogManifestProto(boolean noInit) {} - - private static final RemoteEditLogManifestProto defaultInstance; - public static RemoteEditLogManifestProto getDefaultInstance() { - return defaultInstance; - } - - public RemoteEditLogManifestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogManifestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogManifestProto_fieldAccessorTable; - } - - // repeated .RemoteEditLogProto logs = 1; - public static final int LOGS_FIELD_NUMBER = 1; - private java.util.List logs_; - public java.util.List getLogsList() { - return logs_; - } - public java.util.List - getLogsOrBuilderList() { - return logs_; - } - public int getLogsCount() { - return logs_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto getLogs(int index) { - return logs_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder getLogsOrBuilder( - int index) { - return logs_.get(index); - } - - private void initFields() { - logs_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getLogsCount(); i++) { - if (!getLogs(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < logs_.size(); i++) { - output.writeMessage(1, logs_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < logs_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, logs_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto) obj; - - boolean result = true; - result = result && getLogsList() - .equals(other.getLogsList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getLogsCount() > 0) { - hash = (37 * hash) + LOGS_FIELD_NUMBER; - hash = (53 * hash) + getLogsList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogManifestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogManifestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getLogsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (logsBuilder_ == null) { - logs_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - logsBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto(this); - int from_bitField0_ = bitField0_; - if (logsBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - logs_ = java.util.Collections.unmodifiableList(logs_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.logs_ = logs_; - } else { - result.logs_ = logsBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance()) return this; - if (logsBuilder_ == null) { - if (!other.logs_.isEmpty()) { - if (logs_.isEmpty()) { - logs_ = other.logs_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureLogsIsMutable(); - logs_.addAll(other.logs_); - } - onChanged(); - } - } else { - if (!other.logs_.isEmpty()) { - if (logsBuilder_.isEmpty()) { - logsBuilder_.dispose(); - logsBuilder_ = null; - logs_ = other.logs_; - bitField0_ = (bitField0_ & ~0x00000001); - logsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getLogsFieldBuilder() : null; - } else { - logsBuilder_.addAllMessages(other.logs_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getLogsCount(); i++) { - if (!getLogs(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addLogs(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // repeated .RemoteEditLogProto logs = 1; - private java.util.List logs_ = - java.util.Collections.emptyList(); - private void ensureLogsIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - logs_ = new java.util.ArrayList(logs_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder> logsBuilder_; - - public java.util.List getLogsList() { - if (logsBuilder_ == null) { - return java.util.Collections.unmodifiableList(logs_); - } else { - return logsBuilder_.getMessageList(); - } - } - public int getLogsCount() { - if (logsBuilder_ == null) { - return logs_.size(); - } else { - return logsBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto getLogs(int index) { - if (logsBuilder_ == null) { - return logs_.get(index); - } else { - return logsBuilder_.getMessage(index); - } - } - public Builder setLogs( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto value) { - if (logsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureLogsIsMutable(); - logs_.set(index, value); - onChanged(); - } else { - logsBuilder_.setMessage(index, value); - } - return this; - } - public Builder setLogs( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder builderForValue) { - if (logsBuilder_ == null) { - ensureLogsIsMutable(); - logs_.set(index, builderForValue.build()); - onChanged(); - } else { - logsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addLogs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto value) { - if (logsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureLogsIsMutable(); - logs_.add(value); - onChanged(); - } else { - logsBuilder_.addMessage(value); - } - return this; - } - public Builder addLogs( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto value) { - if (logsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureLogsIsMutable(); - logs_.add(index, value); - onChanged(); - } else { - logsBuilder_.addMessage(index, value); - } - return this; - } - public Builder addLogs( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder builderForValue) { - if (logsBuilder_ == null) { - ensureLogsIsMutable(); - logs_.add(builderForValue.build()); - onChanged(); - } else { - logsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addLogs( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder builderForValue) { - if (logsBuilder_ == null) { - ensureLogsIsMutable(); - logs_.add(index, builderForValue.build()); - onChanged(); - } else { - logsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllLogs( - java.lang.Iterable values) { - if (logsBuilder_ == null) { - ensureLogsIsMutable(); - super.addAll(values, logs_); - onChanged(); - } else { - logsBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearLogs() { - if (logsBuilder_ == null) { - logs_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - logsBuilder_.clear(); - } - return this; - } - public Builder removeLogs(int index) { - if (logsBuilder_ == null) { - ensureLogsIsMutable(); - logs_.remove(index); - onChanged(); - } else { - logsBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder getLogsBuilder( - int index) { - return getLogsFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder getLogsOrBuilder( - int index) { - if (logsBuilder_ == null) { - return logs_.get(index); } else { - return logsBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getLogsOrBuilderList() { - if (logsBuilder_ != null) { - return logsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(logs_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder addLogsBuilder() { - return getLogsFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder addLogsBuilder( - int index) { - return getLogsFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.getDefaultInstance()); - } - public java.util.List - getLogsBuilderList() { - return getLogsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder> - getLogsFieldBuilder() { - if (logsBuilder_ == null) { - logsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder>( - logs_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - logs_ = null; - } - return logsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:RemoteEditLogManifestProto) - } - - static { - defaultInstance = new RemoteEditLogManifestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RemoteEditLogManifestProto) - } - - public interface NamespaceInfoProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string buildVersion = 1; - boolean hasBuildVersion(); - String getBuildVersion(); - - // required uint32 distUpgradeVersion = 2; - boolean hasDistUpgradeVersion(); - int getDistUpgradeVersion(); - - // required string blockPoolID = 3; - boolean hasBlockPoolID(); - String getBlockPoolID(); - - // required .StorageInfoProto storageInfo = 4; - boolean hasStorageInfo(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder(); - } - public static final class NamespaceInfoProto extends - com.google.protobuf.GeneratedMessage - implements NamespaceInfoProtoOrBuilder { - // Use NamespaceInfoProto.newBuilder() to construct. - private NamespaceInfoProto(Builder builder) { - super(builder); - } - private NamespaceInfoProto(boolean noInit) {} - - private static final NamespaceInfoProto defaultInstance; - public static NamespaceInfoProto getDefaultInstance() { - return defaultInstance; - } - - public NamespaceInfoProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamespaceInfoProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamespaceInfoProto_fieldAccessorTable; - } - - private int bitField0_; - // required string buildVersion = 1; - public static final int BUILDVERSION_FIELD_NUMBER = 1; - private java.lang.Object buildVersion_; - public boolean hasBuildVersion() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getBuildVersion() { - java.lang.Object ref = buildVersion_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - buildVersion_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getBuildVersionBytes() { - java.lang.Object ref = buildVersion_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - buildVersion_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required uint32 distUpgradeVersion = 2; - public static final int DISTUPGRADEVERSION_FIELD_NUMBER = 2; - private int distUpgradeVersion_; - public boolean hasDistUpgradeVersion() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getDistUpgradeVersion() { - return distUpgradeVersion_; - } - - // required string blockPoolID = 3; - public static final int BLOCKPOOLID_FIELD_NUMBER = 3; - private java.lang.Object blockPoolID_; - public boolean hasBlockPoolID() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getBlockPoolID() { - java.lang.Object ref = blockPoolID_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - blockPoolID_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getBlockPoolIDBytes() { - java.lang.Object ref = blockPoolID_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - blockPoolID_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required .StorageInfoProto storageInfo = 4; - public static final int STORAGEINFO_FIELD_NUMBER = 4; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_; - public boolean hasStorageInfo() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { - return storageInfo_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { - return storageInfo_; - } - - private void initFields() { - buildVersion_ = ""; - distUpgradeVersion_ = 0; - blockPoolID_ = ""; - storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBuildVersion()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasDistUpgradeVersion()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBlockPoolID()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasStorageInfo()) { - memoizedIsInitialized = 0; - return false; - } - if (!getStorageInfo().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getBuildVersionBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt32(2, distUpgradeVersion_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getBlockPoolIDBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeMessage(4, storageInfo_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getBuildVersionBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(2, distUpgradeVersion_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getBlockPoolIDBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, storageInfo_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto) obj; - - boolean result = true; - result = result && (hasBuildVersion() == other.hasBuildVersion()); - if (hasBuildVersion()) { - result = result && getBuildVersion() - .equals(other.getBuildVersion()); - } - result = result && (hasDistUpgradeVersion() == other.hasDistUpgradeVersion()); - if (hasDistUpgradeVersion()) { - result = result && (getDistUpgradeVersion() - == other.getDistUpgradeVersion()); - } - result = result && (hasBlockPoolID() == other.hasBlockPoolID()); - if (hasBlockPoolID()) { - result = result && getBlockPoolID() - .equals(other.getBlockPoolID()); - } - result = result && (hasStorageInfo() == other.hasStorageInfo()); - if (hasStorageInfo()) { - result = result && getStorageInfo() - .equals(other.getStorageInfo()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBuildVersion()) { - hash = (37 * hash) + BUILDVERSION_FIELD_NUMBER; - hash = (53 * hash) + getBuildVersion().hashCode(); - } - if (hasDistUpgradeVersion()) { - hash = (37 * hash) + DISTUPGRADEVERSION_FIELD_NUMBER; - hash = (53 * hash) + getDistUpgradeVersion(); - } - if (hasBlockPoolID()) { - hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; - hash = (53 * hash) + getBlockPoolID().hashCode(); - } - if (hasStorageInfo()) { - hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER; - hash = (53 * hash) + getStorageInfo().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamespaceInfoProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamespaceInfoProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getStorageInfoFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - buildVersion_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - distUpgradeVersion_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - blockPoolID_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - if (storageInfoBuilder_ == null) { - storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - } else { - storageInfoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.buildVersion_ = buildVersion_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.distUpgradeVersion_ = distUpgradeVersion_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.blockPoolID_ = blockPoolID_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - if (storageInfoBuilder_ == null) { - result.storageInfo_ = storageInfo_; - } else { - result.storageInfo_ = storageInfoBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) return this; - if (other.hasBuildVersion()) { - setBuildVersion(other.getBuildVersion()); - } - if (other.hasDistUpgradeVersion()) { - setDistUpgradeVersion(other.getDistUpgradeVersion()); - } - if (other.hasBlockPoolID()) { - setBlockPoolID(other.getBlockPoolID()); - } - if (other.hasStorageInfo()) { - mergeStorageInfo(other.getStorageInfo()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBuildVersion()) { - - return false; - } - if (!hasDistUpgradeVersion()) { - - return false; - } - if (!hasBlockPoolID()) { - - return false; - } - if (!hasStorageInfo()) { - - return false; - } - if (!getStorageInfo().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - buildVersion_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - distUpgradeVersion_ = input.readUInt32(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - blockPoolID_ = input.readBytes(); - break; - } - case 34: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(); - if (hasStorageInfo()) { - subBuilder.mergeFrom(getStorageInfo()); - } - input.readMessage(subBuilder, extensionRegistry); - setStorageInfo(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required string buildVersion = 1; - private java.lang.Object buildVersion_ = ""; - public boolean hasBuildVersion() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getBuildVersion() { - java.lang.Object ref = buildVersion_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - buildVersion_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setBuildVersion(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - buildVersion_ = value; - onChanged(); - return this; - } - public Builder clearBuildVersion() { - bitField0_ = (bitField0_ & ~0x00000001); - buildVersion_ = getDefaultInstance().getBuildVersion(); - onChanged(); - return this; - } - void setBuildVersion(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - buildVersion_ = value; - onChanged(); - } - - // required uint32 distUpgradeVersion = 2; - private int distUpgradeVersion_ ; - public boolean hasDistUpgradeVersion() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getDistUpgradeVersion() { - return distUpgradeVersion_; - } - public Builder setDistUpgradeVersion(int value) { - bitField0_ |= 0x00000002; - distUpgradeVersion_ = value; - onChanged(); - return this; - } - public Builder clearDistUpgradeVersion() { - bitField0_ = (bitField0_ & ~0x00000002); - distUpgradeVersion_ = 0; - onChanged(); - return this; - } - - // required string blockPoolID = 3; - private java.lang.Object blockPoolID_ = ""; - public boolean hasBlockPoolID() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getBlockPoolID() { - java.lang.Object ref = blockPoolID_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - blockPoolID_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setBlockPoolID(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - blockPoolID_ = value; - onChanged(); - return this; - } - public Builder clearBlockPoolID() { - bitField0_ = (bitField0_ & ~0x00000004); - blockPoolID_ = getDefaultInstance().getBlockPoolID(); - onChanged(); - return this; - } - void setBlockPoolID(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000004; - blockPoolID_ = value; - onChanged(); - } - - // required .StorageInfoProto storageInfo = 4; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_; - public boolean hasStorageInfo() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { - if (storageInfoBuilder_ == null) { - return storageInfo_; - } else { - return storageInfoBuilder_.getMessage(); - } - } - public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { - if (storageInfoBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - storageInfo_ = value; - onChanged(); - } else { - storageInfoBuilder_.setMessage(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder setStorageInfo( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) { - if (storageInfoBuilder_ == null) { - storageInfo_ = builderForValue.build(); - onChanged(); - } else { - storageInfoBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { - if (storageInfoBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008) && - storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) { - storageInfo_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial(); - } else { - storageInfo_ = value; - } - onChanged(); - } else { - storageInfoBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder clearStorageInfo() { - if (storageInfoBuilder_ == null) { - storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); - onChanged(); - } else { - storageInfoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getStorageInfoBuilder() { - bitField0_ |= 0x00000008; - onChanged(); - return getStorageInfoFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { - if (storageInfoBuilder_ != null) { - return storageInfoBuilder_.getMessageOrBuilder(); - } else { - return storageInfo_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> - getStorageInfoFieldBuilder() { - if (storageInfoBuilder_ == null) { - storageInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>( - storageInfo_, - getParentForChildren(), - isClean()); - storageInfo_ = null; - } - return storageInfoBuilder_; - } - - // @@protoc_insertion_point(builder_scope:NamespaceInfoProto) - } - - static { - defaultInstance = new NamespaceInfoProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:NamespaceInfoProto) - } - - public interface BlockKeyProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint32 keyId = 1; - boolean hasKeyId(); - int getKeyId(); - - // required uint64 expiryDate = 2; - boolean hasExpiryDate(); - long getExpiryDate(); - - // required bytes keyBytes = 3; - boolean hasKeyBytes(); - com.google.protobuf.ByteString getKeyBytes(); - } - public static final class BlockKeyProto extends - com.google.protobuf.GeneratedMessage - implements BlockKeyProtoOrBuilder { - // Use BlockKeyProto.newBuilder() to construct. - private BlockKeyProto(Builder builder) { - super(builder); - } - private BlockKeyProto(boolean noInit) {} - - private static final BlockKeyProto defaultInstance; - public static BlockKeyProto getDefaultInstance() { - return defaultInstance; - } - - public BlockKeyProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockKeyProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockKeyProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint32 keyId = 1; - public static final int KEYID_FIELD_NUMBER = 1; - private int keyId_; - public boolean hasKeyId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public int getKeyId() { - return keyId_; - } - - // required uint64 expiryDate = 2; - public static final int EXPIRYDATE_FIELD_NUMBER = 2; - private long expiryDate_; - public boolean hasExpiryDate() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getExpiryDate() { - return expiryDate_; - } - - // required bytes keyBytes = 3; - public static final int KEYBYTES_FIELD_NUMBER = 3; - private com.google.protobuf.ByteString keyBytes_; - public boolean hasKeyBytes() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public com.google.protobuf.ByteString getKeyBytes() { - return keyBytes_; - } - - private void initFields() { - keyId_ = 0; - expiryDate_ = 0L; - keyBytes_ = com.google.protobuf.ByteString.EMPTY; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasKeyId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasExpiryDate()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasKeyBytes()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt32(1, keyId_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, expiryDate_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, keyBytes_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(1, keyId_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, expiryDate_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, keyBytes_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto) obj; - - boolean result = true; - result = result && (hasKeyId() == other.hasKeyId()); - if (hasKeyId()) { - result = result && (getKeyId() - == other.getKeyId()); - } - result = result && (hasExpiryDate() == other.hasExpiryDate()); - if (hasExpiryDate()) { - result = result && (getExpiryDate() - == other.getExpiryDate()); - } - result = result && (hasKeyBytes() == other.hasKeyBytes()); - if (hasKeyBytes()) { - result = result && getKeyBytes() - .equals(other.getKeyBytes()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasKeyId()) { - hash = (37 * hash) + KEYID_FIELD_NUMBER; - hash = (53 * hash) + getKeyId(); - } - if (hasExpiryDate()) { - hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getExpiryDate()); - } - if (hasKeyBytes()) { - hash = (37 * hash) + KEYBYTES_FIELD_NUMBER; - hash = (53 * hash) + getKeyBytes().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockKeyProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockKeyProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - keyId_ = 0; - bitField0_ = (bitField0_ & ~0x00000001); - expiryDate_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - keyBytes_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.keyId_ = keyId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.expiryDate_ = expiryDate_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.keyBytes_ = keyBytes_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance()) return this; - if (other.hasKeyId()) { - setKeyId(other.getKeyId()); - } - if (other.hasExpiryDate()) { - setExpiryDate(other.getExpiryDate()); - } - if (other.hasKeyBytes()) { - setKeyBytes(other.getKeyBytes()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasKeyId()) { - - return false; - } - if (!hasExpiryDate()) { - - return false; - } - if (!hasKeyBytes()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - keyId_ = input.readUInt32(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - expiryDate_ = input.readUInt64(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - keyBytes_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required uint32 keyId = 1; - private int keyId_ ; - public boolean hasKeyId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public int getKeyId() { - return keyId_; - } - public Builder setKeyId(int value) { - bitField0_ |= 0x00000001; - keyId_ = value; - onChanged(); - return this; - } - public Builder clearKeyId() { - bitField0_ = (bitField0_ & ~0x00000001); - keyId_ = 0; - onChanged(); - return this; - } - - // required uint64 expiryDate = 2; - private long expiryDate_ ; - public boolean hasExpiryDate() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getExpiryDate() { - return expiryDate_; - } - public Builder setExpiryDate(long value) { - bitField0_ |= 0x00000002; - expiryDate_ = value; - onChanged(); - return this; - } - public Builder clearExpiryDate() { - bitField0_ = (bitField0_ & ~0x00000002); - expiryDate_ = 0L; - onChanged(); - return this; - } - - // required bytes keyBytes = 3; - private com.google.protobuf.ByteString keyBytes_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasKeyBytes() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public com.google.protobuf.ByteString getKeyBytes() { - return keyBytes_; - } - public Builder setKeyBytes(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - keyBytes_ = value; - onChanged(); - return this; - } - public Builder clearKeyBytes() { - bitField0_ = (bitField0_ & ~0x00000004); - keyBytes_ = getDefaultInstance().getKeyBytes(); - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:BlockKeyProto) - } - - static { - defaultInstance = new BlockKeyProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:BlockKeyProto) - } - - public interface ExportedBlockKeysProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bool isBlockTokenEnabled = 1; - boolean hasIsBlockTokenEnabled(); - boolean getIsBlockTokenEnabled(); - - // required uint64 keyUpdateInterval = 2; - boolean hasKeyUpdateInterval(); - long getKeyUpdateInterval(); - - // required uint64 tokenLifeTime = 3; - boolean hasTokenLifeTime(); - long getTokenLifeTime(); - - // required .BlockKeyProto currentKey = 4; - boolean hasCurrentKey(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getCurrentKey(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getCurrentKeyOrBuilder(); - - // repeated .BlockKeyProto allKeys = 5; - java.util.List - getAllKeysList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getAllKeys(int index); - int getAllKeysCount(); - java.util.List - getAllKeysOrBuilderList(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getAllKeysOrBuilder( - int index); - } - public static final class ExportedBlockKeysProto extends - com.google.protobuf.GeneratedMessage - implements ExportedBlockKeysProtoOrBuilder { - // Use ExportedBlockKeysProto.newBuilder() to construct. - private ExportedBlockKeysProto(Builder builder) { - super(builder); - } - private ExportedBlockKeysProto(boolean noInit) {} - - private static final ExportedBlockKeysProto defaultInstance; - public static ExportedBlockKeysProto getDefaultInstance() { - return defaultInstance; - } - - public ExportedBlockKeysProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ExportedBlockKeysProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ExportedBlockKeysProto_fieldAccessorTable; - } - - private int bitField0_; - // required bool isBlockTokenEnabled = 1; - public static final int ISBLOCKTOKENENABLED_FIELD_NUMBER = 1; - private boolean isBlockTokenEnabled_; - public boolean hasIsBlockTokenEnabled() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getIsBlockTokenEnabled() { - return isBlockTokenEnabled_; - } - - // required uint64 keyUpdateInterval = 2; - public static final int KEYUPDATEINTERVAL_FIELD_NUMBER = 2; - private long keyUpdateInterval_; - public boolean hasKeyUpdateInterval() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getKeyUpdateInterval() { - return keyUpdateInterval_; - } - - // required uint64 tokenLifeTime = 3; - public static final int TOKENLIFETIME_FIELD_NUMBER = 3; - private long tokenLifeTime_; - public boolean hasTokenLifeTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getTokenLifeTime() { - return tokenLifeTime_; - } - - // required .BlockKeyProto currentKey = 4; - public static final int CURRENTKEY_FIELD_NUMBER = 4; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto currentKey_; - public boolean hasCurrentKey() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getCurrentKey() { - return currentKey_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getCurrentKeyOrBuilder() { - return currentKey_; - } - - // repeated .BlockKeyProto allKeys = 5; - public static final int ALLKEYS_FIELD_NUMBER = 5; - private java.util.List allKeys_; - public java.util.List getAllKeysList() { - return allKeys_; - } - public java.util.List - getAllKeysOrBuilderList() { - return allKeys_; - } - public int getAllKeysCount() { - return allKeys_.size(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getAllKeys(int index) { - return allKeys_.get(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getAllKeysOrBuilder( - int index) { - return allKeys_.get(index); - } - - private void initFields() { - isBlockTokenEnabled_ = false; - keyUpdateInterval_ = 0L; - tokenLifeTime_ = 0L; - currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance(); - allKeys_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasIsBlockTokenEnabled()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasKeyUpdateInterval()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasTokenLifeTime()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCurrentKey()) { - memoizedIsInitialized = 0; - return false; - } - if (!getCurrentKey().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getAllKeysCount(); i++) { - if (!getAllKeys(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, isBlockTokenEnabled_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, keyUpdateInterval_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, tokenLifeTime_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeMessage(4, currentKey_); - } - for (int i = 0; i < allKeys_.size(); i++) { - output.writeMessage(5, allKeys_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, isBlockTokenEnabled_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, keyUpdateInterval_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, tokenLifeTime_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, currentKey_); - } - for (int i = 0; i < allKeys_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(5, allKeys_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto) obj; - - boolean result = true; - result = result && (hasIsBlockTokenEnabled() == other.hasIsBlockTokenEnabled()); - if (hasIsBlockTokenEnabled()) { - result = result && (getIsBlockTokenEnabled() - == other.getIsBlockTokenEnabled()); - } - result = result && (hasKeyUpdateInterval() == other.hasKeyUpdateInterval()); - if (hasKeyUpdateInterval()) { - result = result && (getKeyUpdateInterval() - == other.getKeyUpdateInterval()); - } - result = result && (hasTokenLifeTime() == other.hasTokenLifeTime()); - if (hasTokenLifeTime()) { - result = result && (getTokenLifeTime() - == other.getTokenLifeTime()); - } - result = result && (hasCurrentKey() == other.hasCurrentKey()); - if (hasCurrentKey()) { - result = result && getCurrentKey() - .equals(other.getCurrentKey()); - } - result = result && getAllKeysList() - .equals(other.getAllKeysList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasIsBlockTokenEnabled()) { - hash = (37 * hash) + ISBLOCKTOKENENABLED_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getIsBlockTokenEnabled()); - } - if (hasKeyUpdateInterval()) { - hash = (37 * hash) + KEYUPDATEINTERVAL_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getKeyUpdateInterval()); - } - if (hasTokenLifeTime()) { - hash = (37 * hash) + TOKENLIFETIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getTokenLifeTime()); - } - if (hasCurrentKey()) { - hash = (37 * hash) + CURRENTKEY_FIELD_NUMBER; - hash = (53 * hash) + getCurrentKey().hashCode(); - } - if (getAllKeysCount() > 0) { - hash = (37 * hash) + ALLKEYS_FIELD_NUMBER; - hash = (53 * hash) + getAllKeysList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ExportedBlockKeysProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ExportedBlockKeysProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getCurrentKeyFieldBuilder(); - getAllKeysFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - isBlockTokenEnabled_ = false; - bitField0_ = (bitField0_ & ~0x00000001); - keyUpdateInterval_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - tokenLifeTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - if (currentKeyBuilder_ == null) { - currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance(); - } else { - currentKeyBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - if (allKeysBuilder_ == null) { - allKeys_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000010); - } else { - allKeysBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.isBlockTokenEnabled_ = isBlockTokenEnabled_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.keyUpdateInterval_ = keyUpdateInterval_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.tokenLifeTime_ = tokenLifeTime_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - if (currentKeyBuilder_ == null) { - result.currentKey_ = currentKey_; - } else { - result.currentKey_ = currentKeyBuilder_.build(); - } - if (allKeysBuilder_ == null) { - if (((bitField0_ & 0x00000010) == 0x00000010)) { - allKeys_ = java.util.Collections.unmodifiableList(allKeys_); - bitField0_ = (bitField0_ & ~0x00000010); - } - result.allKeys_ = allKeys_; - } else { - result.allKeys_ = allKeysBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance()) return this; - if (other.hasIsBlockTokenEnabled()) { - setIsBlockTokenEnabled(other.getIsBlockTokenEnabled()); - } - if (other.hasKeyUpdateInterval()) { - setKeyUpdateInterval(other.getKeyUpdateInterval()); - } - if (other.hasTokenLifeTime()) { - setTokenLifeTime(other.getTokenLifeTime()); - } - if (other.hasCurrentKey()) { - mergeCurrentKey(other.getCurrentKey()); - } - if (allKeysBuilder_ == null) { - if (!other.allKeys_.isEmpty()) { - if (allKeys_.isEmpty()) { - allKeys_ = other.allKeys_; - bitField0_ = (bitField0_ & ~0x00000010); - } else { - ensureAllKeysIsMutable(); - allKeys_.addAll(other.allKeys_); - } - onChanged(); - } - } else { - if (!other.allKeys_.isEmpty()) { - if (allKeysBuilder_.isEmpty()) { - allKeysBuilder_.dispose(); - allKeysBuilder_ = null; - allKeys_ = other.allKeys_; - bitField0_ = (bitField0_ & ~0x00000010); - allKeysBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getAllKeysFieldBuilder() : null; - } else { - allKeysBuilder_.addAllMessages(other.allKeys_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasIsBlockTokenEnabled()) { - - return false; - } - if (!hasKeyUpdateInterval()) { - - return false; - } - if (!hasTokenLifeTime()) { - - return false; - } - if (!hasCurrentKey()) { - - return false; - } - if (!getCurrentKey().isInitialized()) { - - return false; - } - for (int i = 0; i < getAllKeysCount(); i++) { - if (!getAllKeys(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - isBlockTokenEnabled_ = input.readBool(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - keyUpdateInterval_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - tokenLifeTime_ = input.readUInt64(); - break; - } - case 34: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.newBuilder(); - if (hasCurrentKey()) { - subBuilder.mergeFrom(getCurrentKey()); - } - input.readMessage(subBuilder, extensionRegistry); - setCurrentKey(subBuilder.buildPartial()); - break; - } - case 42: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addAllKeys(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required bool isBlockTokenEnabled = 1; - private boolean isBlockTokenEnabled_ ; - public boolean hasIsBlockTokenEnabled() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public boolean getIsBlockTokenEnabled() { - return isBlockTokenEnabled_; - } - public Builder setIsBlockTokenEnabled(boolean value) { - bitField0_ |= 0x00000001; - isBlockTokenEnabled_ = value; - onChanged(); - return this; - } - public Builder clearIsBlockTokenEnabled() { - bitField0_ = (bitField0_ & ~0x00000001); - isBlockTokenEnabled_ = false; - onChanged(); - return this; - } - - // required uint64 keyUpdateInterval = 2; - private long keyUpdateInterval_ ; - public boolean hasKeyUpdateInterval() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getKeyUpdateInterval() { - return keyUpdateInterval_; - } - public Builder setKeyUpdateInterval(long value) { - bitField0_ |= 0x00000002; - keyUpdateInterval_ = value; - onChanged(); - return this; - } - public Builder clearKeyUpdateInterval() { - bitField0_ = (bitField0_ & ~0x00000002); - keyUpdateInterval_ = 0L; - onChanged(); - return this; - } - - // required uint64 tokenLifeTime = 3; - private long tokenLifeTime_ ; - public boolean hasTokenLifeTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getTokenLifeTime() { - return tokenLifeTime_; - } - public Builder setTokenLifeTime(long value) { - bitField0_ |= 0x00000004; - tokenLifeTime_ = value; - onChanged(); - return this; - } - public Builder clearTokenLifeTime() { - bitField0_ = (bitField0_ & ~0x00000004); - tokenLifeTime_ = 0L; - onChanged(); - return this; - } - - // required .BlockKeyProto currentKey = 4; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder> currentKeyBuilder_; - public boolean hasCurrentKey() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getCurrentKey() { - if (currentKeyBuilder_ == null) { - return currentKey_; - } else { - return currentKeyBuilder_.getMessage(); - } - } - public Builder setCurrentKey(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto value) { - if (currentKeyBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - currentKey_ = value; - onChanged(); - } else { - currentKeyBuilder_.setMessage(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder setCurrentKey( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder builderForValue) { - if (currentKeyBuilder_ == null) { - currentKey_ = builderForValue.build(); - onChanged(); - } else { - currentKeyBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder mergeCurrentKey(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto value) { - if (currentKeyBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008) && - currentKey_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance()) { - currentKey_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.newBuilder(currentKey_).mergeFrom(value).buildPartial(); - } else { - currentKey_ = value; - } - onChanged(); - } else { - currentKeyBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder clearCurrentKey() { - if (currentKeyBuilder_ == null) { - currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance(); - onChanged(); - } else { - currentKeyBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder getCurrentKeyBuilder() { - bitField0_ |= 0x00000008; - onChanged(); - return getCurrentKeyFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getCurrentKeyOrBuilder() { - if (currentKeyBuilder_ != null) { - return currentKeyBuilder_.getMessageOrBuilder(); - } else { - return currentKey_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder> - getCurrentKeyFieldBuilder() { - if (currentKeyBuilder_ == null) { - currentKeyBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder>( - currentKey_, - getParentForChildren(), - isClean()); - currentKey_ = null; - } - return currentKeyBuilder_; - } - - // repeated .BlockKeyProto allKeys = 5; - private java.util.List allKeys_ = - java.util.Collections.emptyList(); - private void ensureAllKeysIsMutable() { - if (!((bitField0_ & 0x00000010) == 0x00000010)) { - allKeys_ = new java.util.ArrayList(allKeys_); - bitField0_ |= 0x00000010; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder> allKeysBuilder_; - - public java.util.List getAllKeysList() { - if (allKeysBuilder_ == null) { - return java.util.Collections.unmodifiableList(allKeys_); - } else { - return allKeysBuilder_.getMessageList(); - } - } - public int getAllKeysCount() { - if (allKeysBuilder_ == null) { - return allKeys_.size(); - } else { - return allKeysBuilder_.getCount(); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getAllKeys(int index) { - if (allKeysBuilder_ == null) { - return allKeys_.get(index); - } else { - return allKeysBuilder_.getMessage(index); - } - } - public Builder setAllKeys( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto value) { - if (allKeysBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAllKeysIsMutable(); - allKeys_.set(index, value); - onChanged(); - } else { - allKeysBuilder_.setMessage(index, value); - } - return this; - } - public Builder setAllKeys( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder builderForValue) { - if (allKeysBuilder_ == null) { - ensureAllKeysIsMutable(); - allKeys_.set(index, builderForValue.build()); - onChanged(); - } else { - allKeysBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto value) { - if (allKeysBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAllKeysIsMutable(); - allKeys_.add(value); - onChanged(); - } else { - allKeysBuilder_.addMessage(value); - } - return this; - } - public Builder addAllKeys( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto value) { - if (allKeysBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAllKeysIsMutable(); - allKeys_.add(index, value); - onChanged(); - } else { - allKeysBuilder_.addMessage(index, value); - } - return this; - } - public Builder addAllKeys( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder builderForValue) { - if (allKeysBuilder_ == null) { - ensureAllKeysIsMutable(); - allKeys_.add(builderForValue.build()); - onChanged(); - } else { - allKeysBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addAllKeys( - int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder builderForValue) { - if (allKeysBuilder_ == null) { - ensureAllKeysIsMutable(); - allKeys_.add(index, builderForValue.build()); - onChanged(); - } else { - allKeysBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllAllKeys( - java.lang.Iterable values) { - if (allKeysBuilder_ == null) { - ensureAllKeysIsMutable(); - super.addAll(values, allKeys_); - onChanged(); - } else { - allKeysBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearAllKeys() { - if (allKeysBuilder_ == null) { - allKeys_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000010); - onChanged(); - } else { - allKeysBuilder_.clear(); - } - return this; - } - public Builder removeAllKeys(int index) { - if (allKeysBuilder_ == null) { - ensureAllKeysIsMutable(); - allKeys_.remove(index); - onChanged(); - } else { - allKeysBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder getAllKeysBuilder( - int index) { - return getAllKeysFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getAllKeysOrBuilder( - int index) { - if (allKeysBuilder_ == null) { - return allKeys_.get(index); } else { - return allKeysBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getAllKeysOrBuilderList() { - if (allKeysBuilder_ != null) { - return allKeysBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(allKeys_); - } - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder addAllKeysBuilder() { - return getAllKeysFieldBuilder().addBuilder( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance()); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder addAllKeysBuilder( - int index) { - return getAllKeysFieldBuilder().addBuilder( - index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance()); - } - public java.util.List - getAllKeysBuilderList() { - return getAllKeysFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder> - getAllKeysFieldBuilder() { - if (allKeysBuilder_ == null) { - allKeysBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder>( - allKeys_, - ((bitField0_ & 0x00000010) == 0x00000010), - getParentForChildren(), - isClean()); - allKeys_ = null; - } - return allKeysBuilder_; - } - - // @@protoc_insertion_point(builder_scope:ExportedBlockKeysProto) - } - - static { - defaultInstance = new ExportedBlockKeysProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ExportedBlockKeysProto) - } - - public interface RecoveringBlockProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint64 newGenStamp = 1; - boolean hasNewGenStamp(); - long getNewGenStamp(); - - // required .LocatedBlockProto block = 2; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder(); - } - public static final class RecoveringBlockProto extends - com.google.protobuf.GeneratedMessage - implements RecoveringBlockProtoOrBuilder { - // Use RecoveringBlockProto.newBuilder() to construct. - private RecoveringBlockProto(Builder builder) { - super(builder); - } - private RecoveringBlockProto(boolean noInit) {} - - private static final RecoveringBlockProto defaultInstance; - public static RecoveringBlockProto getDefaultInstance() { - return defaultInstance; - } - - public RecoveringBlockProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RecoveringBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RecoveringBlockProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint64 newGenStamp = 1; - public static final int NEWGENSTAMP_FIELD_NUMBER = 1; - private long newGenStamp_; - public boolean hasNewGenStamp() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getNewGenStamp() { - return newGenStamp_; - } - - // required .LocatedBlockProto block = 2; - public static final int BLOCK_FIELD_NUMBER = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - private void initFields() { - newGenStamp_ = 0L; - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasNewGenStamp()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, newGenStamp_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, block_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, newGenStamp_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, block_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto) obj; - - boolean result = true; - result = result && (hasNewGenStamp() == other.hasNewGenStamp()); - if (hasNewGenStamp()) { - result = result && (getNewGenStamp() - == other.getNewGenStamp()); - } - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasNewGenStamp()) { - hash = (37 * hash) + NEWGENSTAMP_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getNewGenStamp()); - } - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RecoveringBlockProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RecoveringBlockProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - newGenStamp_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.newGenStamp_ = newGenStamp_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance()) return this; - if (other.hasNewGenStamp()) { - setNewGenStamp(other.getNewGenStamp()); - } - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasNewGenStamp()) { - - return false; - } - if (!hasBlock()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - newGenStamp_ = input.readUInt64(); - break; - } - case 18: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required uint64 newGenStamp = 1; - private long newGenStamp_ ; - public boolean hasNewGenStamp() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getNewGenStamp() { - return newGenStamp_; - } - public Builder setNewGenStamp(long value) { - bitField0_ |= 0x00000001; - newGenStamp_ = value; - onChanged(); - return this; - } - public Builder clearNewGenStamp() { - bitField0_ = (bitField0_ & ~0x00000001); - newGenStamp_ = 0L; - onChanged(); - return this; - } - - // required .LocatedBlockProto block = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // @@protoc_insertion_point(builder_scope:RecoveringBlockProto) - } - - static { - defaultInstance = new RecoveringBlockProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RecoveringBlockProto) - } - - public interface VersionRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class VersionRequestProto extends - com.google.protobuf.GeneratedMessage - implements VersionRequestProtoOrBuilder { - // Use VersionRequestProto.newBuilder() to construct. - private VersionRequestProto(Builder builder) { - super(builder); - } - private VersionRequestProto(boolean noInit) {} - - private static final VersionRequestProto defaultInstance; - public static VersionRequestProto getDefaultInstance() { - return defaultInstance; - } - - public VersionRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_VersionRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_VersionRequestProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_VersionRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_VersionRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:VersionRequestProto) - } - - static { - defaultInstance = new VersionRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:VersionRequestProto) - } - - public interface VersionResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .NamespaceInfoProto info = 1; - boolean hasInfo(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getInfo(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getInfoOrBuilder(); - } - public static final class VersionResponseProto extends - com.google.protobuf.GeneratedMessage - implements VersionResponseProtoOrBuilder { - // Use VersionResponseProto.newBuilder() to construct. - private VersionResponseProto(Builder builder) { - super(builder); - } - private VersionResponseProto(boolean noInit) {} - - private static final VersionResponseProto defaultInstance; - public static VersionResponseProto getDefaultInstance() { - return defaultInstance; - } - - public VersionResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_VersionResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_VersionResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .NamespaceInfoProto info = 1; - public static final int INFO_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto info_; - public boolean hasInfo() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getInfo() { - return info_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getInfoOrBuilder() { - return info_; - } - - private void initFields() { - info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasInfo()) { - memoizedIsInitialized = 0; - return false; - } - if (!getInfo().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, info_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, info_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto) obj; - - boolean result = true; - result = result && (hasInfo() == other.hasInfo()); - if (hasInfo()) { - result = result && getInfo() - .equals(other.getInfo()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasInfo()) { - hash = (37 * hash) + INFO_FIELD_NUMBER; - hash = (53 * hash) + getInfo().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_VersionResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_VersionResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getInfoFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (infoBuilder_ == null) { - info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance(); - } else { - infoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (infoBuilder_ == null) { - result.info_ = info_; - } else { - result.info_ = infoBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance()) return this; - if (other.hasInfo()) { - mergeInfo(other.getInfo()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasInfo()) { - - return false; - } - if (!getInfo().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(); - if (hasInfo()) { - subBuilder.mergeFrom(getInfo()); - } - input.readMessage(subBuilder, extensionRegistry); - setInfo(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .NamespaceInfoProto info = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> infoBuilder_; - public boolean hasInfo() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getInfo() { - if (infoBuilder_ == null) { - return info_; - } else { - return infoBuilder_.getMessage(); - } - } - public Builder setInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) { - if (infoBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - info_ = value; - onChanged(); - } else { - infoBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setInfo( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) { - if (infoBuilder_ == null) { - info_ = builderForValue.build(); - onChanged(); - } else { - infoBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) { - if (infoBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - info_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) { - info_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(info_).mergeFrom(value).buildPartial(); - } else { - info_ = value; - } - onChanged(); - } else { - infoBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearInfo() { - if (infoBuilder_ == null) { - info_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance(); - onChanged(); - } else { - infoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getInfoBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getInfoFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getInfoOrBuilder() { - if (infoBuilder_ != null) { - return infoBuilder_.getMessageOrBuilder(); - } else { - return info_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> - getInfoFieldBuilder() { - if (infoBuilder_ == null) { - infoBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>( - info_, - getParentForChildren(), - isClean()); - info_ = null; - } - return infoBuilder_; - } - - // @@protoc_insertion_point(builder_scope:VersionResponseProto) - } - - static { - defaultInstance = new VersionResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:VersionResponseProto) - } - - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ExtendedBlockProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ExtendedBlockProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_BlockTokenIdentifierProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_BlockTokenIdentifierProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_DatanodeIDProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_DatanodeIDProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_DatanodeInfosProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_DatanodeInfosProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_DatanodeInfoProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_DatanodeInfoProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ContentSummaryProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ContentSummaryProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_CorruptFileBlocksProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_CorruptFileBlocksProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_FsPermissionProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_FsPermissionProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_LocatedBlockProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_LocatedBlockProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_LocatedBlocksProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_LocatedBlocksProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_HdfsFileStatusProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_HdfsFileStatusProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_FsServerDefaultsProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_FsServerDefaultsProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_DirectoryListingProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_DirectoryListingProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_UpgradeStatusReportProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_UpgradeStatusReportProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_StorageInfoProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_StorageInfoProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_NamenodeRegistrationProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_NamenodeRegistrationProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_CheckpointSignatureProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_CheckpointSignatureProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_NamenodeCommandProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_NamenodeCommandProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_CheckpointCommandProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_CheckpointCommandProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_BlockProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_BlockProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_BlockWithLocationsProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_BlockWithLocationsProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_BlocksWithLocationsProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_BlocksWithLocationsProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RemoteEditLogProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RemoteEditLogProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RemoteEditLogManifestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RemoteEditLogManifestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_NamespaceInfoProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_NamespaceInfoProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_BlockKeyProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_BlockKeyProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ExportedBlockKeysProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ExportedBlockKeysProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RecoveringBlockProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RecoveringBlockProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_VersionRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_VersionRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_VersionResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_VersionResponseProto_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; - } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n\nhdfs.proto\"`\n\022ExtendedBlockProto\022\016\n\006po" + - "olId\030\001 \002(\t\022\017\n\007blockId\030\002 \002(\004\022\027\n\017generatio" + - "nStamp\030\003 \002(\004\022\020\n\010numBytes\030\004 \001(\004\"`\n\031BlockT" + - "okenIdentifierProto\022\022\n\nidentifier\030\001 \002(\014\022" + - "\020\n\010password\030\002 \002(\014\022\014\n\004kind\030\003 \002(\t\022\017\n\007servi" + - "ce\030\004 \002(\t\"U\n\017DatanodeIDProto\022\014\n\004name\030\001 \002(" + - "\t\022\021\n\tstorageID\030\002 \002(\t\022\020\n\010infoPort\030\003 \002(\r\022\017" + - "\n\007ipcPort\030\004 \002(\r\";\n\022DatanodeInfosProto\022%\n" + - "\tdatanodes\030\001 \003(\0132\022.DatanodeInfoProto\"\312\002\n" + - "\021DatanodeInfoProto\022\034\n\002id\030\001 \002(\0132\020.Datanod", - "eIDProto\022\020\n\010capacity\030\002 \001(\004\022\017\n\007dfsUsed\030\003 " + - "\001(\004\022\021\n\tremaining\030\004 \001(\004\022\025\n\rblockPoolUsed\030" + - "\005 \001(\004\022\022\n\nlastUpdate\030\006 \001(\004\022\024\n\014xceiverCoun" + - "t\030\007 \001(\r\022\020\n\010location\030\010 \001(\t\022\020\n\010hostName\030\t " + - "\001(\t\0221\n\nadminState\030\n \001(\0162\035.DatanodeInfoPr" + - "oto.AdminState\"I\n\nAdminState\022\n\n\006NORMAL\020\000" + - "\022\033\n\027DECOMMISSION_INPROGRESS\020\001\022\022\n\016DECOMMI" + - "SSIONED\020\002\"\212\001\n\023ContentSummaryProto\022\016\n\006len" + - "gth\030\001 \002(\004\022\021\n\tfileCount\030\002 \002(\004\022\026\n\016director" + - "yCount\030\003 \002(\004\022\r\n\005quota\030\004 \002(\004\022\025\n\rspaceCons", - "umed\030\005 \002(\004\022\022\n\nspaceQuota\030\006 \002(\004\"7\n\026Corrup" + - "tFileBlocksProto\022\r\n\005files\030\001 \003(\t\022\016\n\006cooki" + - "e\030\002 \002(\t\"!\n\021FsPermissionProto\022\014\n\004perm\030\001 \002" + - "(\r\"\246\001\n\021LocatedBlockProto\022\036\n\001b\030\001 \002(\0132\023.Ex" + - "tendedBlockProto\022\016\n\006offset\030\002 \002(\004\022 \n\004locs" + - "\030\003 \003(\0132\022.DatanodeInfoProto\022\017\n\007corrupt\030\004 " + - "\002(\010\022.\n\nblockToken\030\005 \002(\0132\032.BlockTokenIden" + - "tifierProto\"\253\001\n\022LocatedBlocksProto\022\022\n\nfi" + - "leLength\030\001 \002(\004\022\"\n\006blocks\030\002 \003(\0132\022.Located" + - "BlockProto\022\031\n\021underConstruction\030\003 \002(\010\022%\n", - "\tlastBlock\030\004 \001(\0132\022.LocatedBlockProto\022\033\n\023" + - "isLastBlockComplete\030\005 \002(\010\"\366\002\n\023HdfsFileSt" + - "atusProto\022/\n\010fileType\030\001 \002(\0162\035.HdfsFileSt" + - "atusProto.FileType\022\014\n\004path\030\002 \002(\014\022\016\n\006leng" + - "th\030\003 \002(\004\022&\n\npermission\030\004 \002(\0132\022.FsPermiss" + - "ionProto\022\r\n\005owner\030\005 \002(\t\022\r\n\005group\030\006 \002(\t\022\031" + - "\n\021modification_time\030\007 \002(\004\022\023\n\013access_time" + - "\030\010 \002(\004\022\017\n\007symlink\030\t \001(\014\022\031\n\021block_replica" + - "tion\030\n \001(\r\022\021\n\tblocksize\030\013 \001(\004\022&\n\tlocatio" + - "ns\030\014 \001(\0132\023.LocatedBlocksProto\"3\n\010FileTyp", - "e\022\n\n\006IS_DIR\020\001\022\013\n\007IS_FILE\020\002\022\016\n\nIS_SYMLINK" + - "\020\003\"\212\001\n\025FsServerDefaultsProto\022\021\n\tblockSiz" + - "e\030\001 \002(\004\022\030\n\020bytesPerChecksum\030\002 \002(\r\022\027\n\017wri" + - "tePacketSize\030\003 \002(\r\022\023\n\013replication\030\004 \002(\r\022" + - "\026\n\016fileBufferSize\030\005 \002(\r\"_\n\025DirectoryList" + - "ingProto\022,\n\016partialListing\030\001 \003(\0132\024.HdfsF" + - "ileStatusProto\022\030\n\020remainingEntries\030\002 \002(\r" + - "\"B\n\030UpgradeStatusReportProto\022\017\n\007version\030" + - "\001 \002(\r\022\025\n\rupgradeStatus\030\002 \002(\r\"_\n\020StorageI" + - "nfoProto\022\025\n\rlayoutVersion\030\001 \002(\r\022\022\n\nnames", - "pceID\030\002 \002(\r\022\021\n\tclusterID\030\003 \002(\t\022\r\n\005cTime\030" + - "\004 \002(\004\"\347\001\n\031NamenodeRegistrationProto\022\022\n\nr" + - "pcAddress\030\001 \002(\t\022\023\n\013httpAddress\030\002 \002(\t\022&\n\013" + - "storageInfo\030\003 \002(\0132\021.StorageInfoProto\022:\n\004" + - "role\030\004 \001(\0162,.NamenodeRegistrationProto.N" + - "amenodeRoleProto\"=\n\021NamenodeRoleProto\022\014\n" + - "\010NAMENODE\020\001\022\n\n\006BACKUP\020\002\022\016\n\nCHECKPOINT\020\003\"" + - "\221\001\n\030CheckpointSignatureProto\022\023\n\013blockPoo" + - "lId\030\001 \002(\t\022 \n\030mostRecentCheckpointTxId\030\002 " + - "\002(\004\022\026\n\016curSegmentTxId\030\003 \002(\004\022&\n\013storageIn", - "fo\030\004 \002(\0132\021.StorageInfoProto\"\264\001\n\024Namenode" + - "CommandProto\022\016\n\006action\030\001 \002(\r\022(\n\004type\030\002 \002" + - "(\0162\032.NamenodeCommandProto.Type\022.\n\rcheckp" + - "ointCmd\030\003 \001(\0132\027.CheckpointCommandProto\"2" + - "\n\004Type\022\023\n\017NamenodeCommand\020\000\022\025\n\021CheckPoin" + - "tCommand\020\001\"a\n\026CheckpointCommandProto\022,\n\t" + - "signature\030\001 \002(\0132\031.CheckpointSignaturePro" + - "to\022\031\n\021needToReturnImage\030\002 \002(\010\"A\n\nBlockPr" + - "oto\022\017\n\007blockId\030\001 \002(\004\022\020\n\010genStamp\030\002 \002(\004\022\020" + - "\n\010numBytes\030\003 \001(\004\"J\n\027BlockWithLocationsPr", - "oto\022\032\n\005block\030\001 \002(\0132\013.BlockProto\022\023\n\013datan" + - "odeIDs\030\002 \003(\t\"D\n\030BlocksWithLocationsProto" + - "\022(\n\006blocks\030\001 \003(\0132\030.BlockWithLocationsPro" + - "to\"8\n\022RemoteEditLogProto\022\021\n\tstartTxId\030\001 " + - "\002(\004\022\017\n\007endTxId\030\002 \002(\004\"?\n\032RemoteEditLogMan" + - "ifestProto\022!\n\004logs\030\001 \003(\0132\023.RemoteEditLog" + - "Proto\"\203\001\n\022NamespaceInfoProto\022\024\n\014buildVer" + - "sion\030\001 \002(\t\022\032\n\022distUpgradeVersion\030\002 \002(\r\022\023" + - "\n\013blockPoolID\030\003 \002(\t\022&\n\013storageInfo\030\004 \002(\013" + - "2\021.StorageInfoProto\"D\n\rBlockKeyProto\022\r\n\005", - "keyId\030\001 \002(\r\022\022\n\nexpiryDate\030\002 \002(\004\022\020\n\010keyBy" + - "tes\030\003 \002(\014\"\254\001\n\026ExportedBlockKeysProto\022\033\n\023" + - "isBlockTokenEnabled\030\001 \002(\010\022\031\n\021keyUpdateIn" + - "terval\030\002 \002(\004\022\025\n\rtokenLifeTime\030\003 \002(\004\022\"\n\nc" + - "urrentKey\030\004 \002(\0132\016.BlockKeyProto\022\037\n\007allKe" + - "ys\030\005 \003(\0132\016.BlockKeyProto\"N\n\024RecoveringBl" + - "ockProto\022\023\n\013newGenStamp\030\001 \002(\004\022!\n\005block\030\002" + - " \002(\0132\022.LocatedBlockProto\"\025\n\023VersionReque" + - "stProto\"9\n\024VersionResponseProto\022!\n\004info\030" + - "\001 \002(\0132\023.NamespaceInfoProto*L\n\021ReplicaSta", - "teProto\022\r\n\tFINALIZED\020\000\022\007\n\003RBW\020\001\022\007\n\003RWR\020\002" + - "\022\007\n\003RUR\020\003\022\r\n\tTEMPORARY\020\004B6\n%org.apache.h" + - "adoop.hdfs.protocol.protoB\nHdfsProtos\240\001\001" - }; - com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { - public com.google.protobuf.ExtensionRegistry assignDescriptors( - com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - internal_static_ExtendedBlockProto_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_ExtendedBlockProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ExtendedBlockProto_descriptor, - new java.lang.String[] { "PoolId", "BlockId", "GenerationStamp", "NumBytes", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class); - internal_static_BlockTokenIdentifierProto_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_BlockTokenIdentifierProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_BlockTokenIdentifierProto_descriptor, - new java.lang.String[] { "Identifier", "Password", "Kind", "Service", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder.class); - internal_static_DatanodeIDProto_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_DatanodeIDProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_DatanodeIDProto_descriptor, - new java.lang.String[] { "Name", "StorageID", "InfoPort", "IpcPort", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class); - internal_static_DatanodeInfosProto_descriptor = - getDescriptor().getMessageTypes().get(3); - internal_static_DatanodeInfosProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_DatanodeInfosProto_descriptor, - new java.lang.String[] { "Datanodes", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder.class); - internal_static_DatanodeInfoProto_descriptor = - getDescriptor().getMessageTypes().get(4); - internal_static_DatanodeInfoProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_DatanodeInfoProto_descriptor, - new java.lang.String[] { "Id", "Capacity", "DfsUsed", "Remaining", "BlockPoolUsed", "LastUpdate", "XceiverCount", "Location", "HostName", "AdminState", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class); - internal_static_ContentSummaryProto_descriptor = - getDescriptor().getMessageTypes().get(5); - internal_static_ContentSummaryProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ContentSummaryProto_descriptor, - new java.lang.String[] { "Length", "FileCount", "DirectoryCount", "Quota", "SpaceConsumed", "SpaceQuota", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder.class); - internal_static_CorruptFileBlocksProto_descriptor = - getDescriptor().getMessageTypes().get(6); - internal_static_CorruptFileBlocksProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_CorruptFileBlocksProto_descriptor, - new java.lang.String[] { "Files", "Cookie", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder.class); - internal_static_FsPermissionProto_descriptor = - getDescriptor().getMessageTypes().get(7); - internal_static_FsPermissionProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_FsPermissionProto_descriptor, - new java.lang.String[] { "Perm", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder.class); - internal_static_LocatedBlockProto_descriptor = - getDescriptor().getMessageTypes().get(8); - internal_static_LocatedBlockProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_LocatedBlockProto_descriptor, - new java.lang.String[] { "B", "Offset", "Locs", "Corrupt", "BlockToken", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder.class); - internal_static_LocatedBlocksProto_descriptor = - getDescriptor().getMessageTypes().get(9); - internal_static_LocatedBlocksProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_LocatedBlocksProto_descriptor, - new java.lang.String[] { "FileLength", "Blocks", "UnderConstruction", "LastBlock", "IsLastBlockComplete", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder.class); - internal_static_HdfsFileStatusProto_descriptor = - getDescriptor().getMessageTypes().get(10); - internal_static_HdfsFileStatusProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_HdfsFileStatusProto_descriptor, - new java.lang.String[] { "FileType", "Path", "Length", "Permission", "Owner", "Group", "ModificationTime", "AccessTime", "Symlink", "BlockReplication", "Blocksize", "Locations", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder.class); - internal_static_FsServerDefaultsProto_descriptor = - getDescriptor().getMessageTypes().get(11); - internal_static_FsServerDefaultsProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_FsServerDefaultsProto_descriptor, - new java.lang.String[] { "BlockSize", "BytesPerChecksum", "WritePacketSize", "Replication", "FileBufferSize", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder.class); - internal_static_DirectoryListingProto_descriptor = - getDescriptor().getMessageTypes().get(12); - internal_static_DirectoryListingProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_DirectoryListingProto_descriptor, - new java.lang.String[] { "PartialListing", "RemainingEntries", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder.class); - internal_static_UpgradeStatusReportProto_descriptor = - getDescriptor().getMessageTypes().get(13); - internal_static_UpgradeStatusReportProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_UpgradeStatusReportProto_descriptor, - new java.lang.String[] { "Version", "UpgradeStatus", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.Builder.class); - internal_static_StorageInfoProto_descriptor = - getDescriptor().getMessageTypes().get(14); - internal_static_StorageInfoProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_StorageInfoProto_descriptor, - new java.lang.String[] { "LayoutVersion", "NamespceID", "ClusterID", "CTime", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder.class); - internal_static_NamenodeRegistrationProto_descriptor = - getDescriptor().getMessageTypes().get(15); - internal_static_NamenodeRegistrationProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_NamenodeRegistrationProto_descriptor, - new java.lang.String[] { "RpcAddress", "HttpAddress", "StorageInfo", "Role", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder.class); - internal_static_CheckpointSignatureProto_descriptor = - getDescriptor().getMessageTypes().get(16); - internal_static_CheckpointSignatureProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_CheckpointSignatureProto_descriptor, - new java.lang.String[] { "BlockPoolId", "MostRecentCheckpointTxId", "CurSegmentTxId", "StorageInfo", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder.class); - internal_static_NamenodeCommandProto_descriptor = - getDescriptor().getMessageTypes().get(17); - internal_static_NamenodeCommandProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_NamenodeCommandProto_descriptor, - new java.lang.String[] { "Action", "Type", "CheckpointCmd", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder.class); - internal_static_CheckpointCommandProto_descriptor = - getDescriptor().getMessageTypes().get(18); - internal_static_CheckpointCommandProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_CheckpointCommandProto_descriptor, - new java.lang.String[] { "Signature", "NeedToReturnImage", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder.class); - internal_static_BlockProto_descriptor = - getDescriptor().getMessageTypes().get(19); - internal_static_BlockProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_BlockProto_descriptor, - new java.lang.String[] { "BlockId", "GenStamp", "NumBytes", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder.class); - internal_static_BlockWithLocationsProto_descriptor = - getDescriptor().getMessageTypes().get(20); - internal_static_BlockWithLocationsProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_BlockWithLocationsProto_descriptor, - new java.lang.String[] { "Block", "DatanodeIDs", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder.class); - internal_static_BlocksWithLocationsProto_descriptor = - getDescriptor().getMessageTypes().get(21); - internal_static_BlocksWithLocationsProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_BlocksWithLocationsProto_descriptor, - new java.lang.String[] { "Blocks", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.Builder.class); - internal_static_RemoteEditLogProto_descriptor = - getDescriptor().getMessageTypes().get(22); - internal_static_RemoteEditLogProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RemoteEditLogProto_descriptor, - new java.lang.String[] { "StartTxId", "EndTxId", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder.class); - internal_static_RemoteEditLogManifestProto_descriptor = - getDescriptor().getMessageTypes().get(23); - internal_static_RemoteEditLogManifestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RemoteEditLogManifestProto_descriptor, - new java.lang.String[] { "Logs", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder.class); - internal_static_NamespaceInfoProto_descriptor = - getDescriptor().getMessageTypes().get(24); - internal_static_NamespaceInfoProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_NamespaceInfoProto_descriptor, - new java.lang.String[] { "BuildVersion", "DistUpgradeVersion", "BlockPoolID", "StorageInfo", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder.class); - internal_static_BlockKeyProto_descriptor = - getDescriptor().getMessageTypes().get(25); - internal_static_BlockKeyProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_BlockKeyProto_descriptor, - new java.lang.String[] { "KeyId", "ExpiryDate", "KeyBytes", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder.class); - internal_static_ExportedBlockKeysProto_descriptor = - getDescriptor().getMessageTypes().get(26); - internal_static_ExportedBlockKeysProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ExportedBlockKeysProto_descriptor, - new java.lang.String[] { "IsBlockTokenEnabled", "KeyUpdateInterval", "TokenLifeTime", "CurrentKey", "AllKeys", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder.class); - internal_static_RecoveringBlockProto_descriptor = - getDescriptor().getMessageTypes().get(27); - internal_static_RecoveringBlockProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RecoveringBlockProto_descriptor, - new java.lang.String[] { "NewGenStamp", "Block", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder.class); - internal_static_VersionRequestProto_descriptor = - getDescriptor().getMessageTypes().get(28); - internal_static_VersionRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_VersionRequestProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.Builder.class); - internal_static_VersionResponseProto_descriptor = - getDescriptor().getMessageTypes().get(29); - internal_static_VersionResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_VersionResponseProto_descriptor, - new java.lang.String[] { "Info", }, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.Builder.class); - return null; - } - }; - com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - }, assigner); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/InterDatanodeProtocolProtos.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/InterDatanodeProtocolProtos.java deleted file mode 100644 index 70a01a687a3..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/InterDatanodeProtocolProtos.java +++ /dev/null @@ -1,2517 +0,0 @@ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: InterDatanodeProtocol.proto - -package org.apache.hadoop.hdfs.protocol.proto; - -public final class InterDatanodeProtocolProtos { - private InterDatanodeProtocolProtos() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistry registry) { - } - public interface InitReplicaRecoveryRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .RecoveringBlockProto block = 1; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlockOrBuilder(); - } - public static final class InitReplicaRecoveryRequestProto extends - com.google.protobuf.GeneratedMessage - implements InitReplicaRecoveryRequestProtoOrBuilder { - // Use InitReplicaRecoveryRequestProto.newBuilder() to construct. - private InitReplicaRecoveryRequestProto(Builder builder) { - super(builder); - } - private InitReplicaRecoveryRequestProto(boolean noInit) {} - - private static final InitReplicaRecoveryRequestProto defaultInstance; - public static InitReplicaRecoveryRequestProto getDefaultInstance() { - return defaultInstance; - } - - public InitReplicaRecoveryRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .RecoveringBlockProto block = 1; - public static final int BLOCK_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - private void initFields() { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, block_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, block_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto) obj; - - boolean result = true; - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.getDefaultInstance()) return this; - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlock()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .RecoveringBlockProto block = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // @@protoc_insertion_point(builder_scope:InitReplicaRecoveryRequestProto) - } - - static { - defaultInstance = new InitReplicaRecoveryRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:InitReplicaRecoveryRequestProto) - } - - public interface InitReplicaRecoveryResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ReplicaStateProto state = 1; - boolean hasState(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto getState(); - - // required .BlockProto block = 2; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder(); - } - public static final class InitReplicaRecoveryResponseProto extends - com.google.protobuf.GeneratedMessage - implements InitReplicaRecoveryResponseProtoOrBuilder { - // Use InitReplicaRecoveryResponseProto.newBuilder() to construct. - private InitReplicaRecoveryResponseProto(Builder builder) { - super(builder); - } - private InitReplicaRecoveryResponseProto(boolean noInit) {} - - private static final InitReplicaRecoveryResponseProto defaultInstance; - public static InitReplicaRecoveryResponseProto getDefaultInstance() { - return defaultInstance; - } - - public InitReplicaRecoveryResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ReplicaStateProto state = 1; - public static final int STATE_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto state_; - public boolean hasState() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto getState() { - return state_; - } - - // required .BlockProto block = 2; - public static final int BLOCK_FIELD_NUMBER = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - private void initFields() { - state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto.FINALIZED; - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasState()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, state_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, block_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, state_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, block_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto) obj; - - boolean result = true; - result = result && (hasState() == other.hasState()); - if (hasState()) { - result = result && - (getState() == other.getState()); - } - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasState()) { - hash = (37 * hash) + STATE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getState()); - } - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto.FINALIZED; - bitField0_ = (bitField0_ & ~0x00000001); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.state_ = state_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance()) return this; - if (other.hasState()) { - setState(other.getState()); - } - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasState()) { - - return false; - } - if (!hasBlock()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - state_ = value; - } - break; - } - case 18: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ReplicaStateProto state = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto.FINALIZED; - public boolean hasState() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto getState() { - return state_; - } - public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - state_ = value; - onChanged(); - return this; - } - public Builder clearState() { - bitField0_ = (bitField0_ & ~0x00000001); - state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto.FINALIZED; - onChanged(); - return this; - } - - // required .BlockProto block = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // @@protoc_insertion_point(builder_scope:InitReplicaRecoveryResponseProto) - } - - static { - defaultInstance = new InitReplicaRecoveryResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:InitReplicaRecoveryResponseProto) - } - - public interface UpdateReplicaUnderRecoveryRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ExtendedBlockProto block = 1; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder(); - - // required uint64 recoveryId = 2; - boolean hasRecoveryId(); - long getRecoveryId(); - - // required uint64 newLength = 3; - boolean hasNewLength(); - long getNewLength(); - } - public static final class UpdateReplicaUnderRecoveryRequestProto extends - com.google.protobuf.GeneratedMessage - implements UpdateReplicaUnderRecoveryRequestProtoOrBuilder { - // Use UpdateReplicaUnderRecoveryRequestProto.newBuilder() to construct. - private UpdateReplicaUnderRecoveryRequestProto(Builder builder) { - super(builder); - } - private UpdateReplicaUnderRecoveryRequestProto(boolean noInit) {} - - private static final UpdateReplicaUnderRecoveryRequestProto defaultInstance; - public static UpdateReplicaUnderRecoveryRequestProto getDefaultInstance() { - return defaultInstance; - } - - public UpdateReplicaUnderRecoveryRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ExtendedBlockProto block = 1; - public static final int BLOCK_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - // required uint64 recoveryId = 2; - public static final int RECOVERYID_FIELD_NUMBER = 2; - private long recoveryId_; - public boolean hasRecoveryId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getRecoveryId() { - return recoveryId_; - } - - // required uint64 newLength = 3; - public static final int NEWLENGTH_FIELD_NUMBER = 3; - private long newLength_; - public boolean hasNewLength() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getNewLength() { - return newLength_; - } - - private void initFields() { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - recoveryId_ = 0L; - newLength_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasRecoveryId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasNewLength()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, block_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, recoveryId_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, newLength_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, block_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, recoveryId_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, newLength_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto) obj; - - boolean result = true; - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && (hasRecoveryId() == other.hasRecoveryId()); - if (hasRecoveryId()) { - result = result && (getRecoveryId() - == other.getRecoveryId()); - } - result = result && (hasNewLength() == other.hasNewLength()); - if (hasNewLength()) { - result = result && (getNewLength() - == other.getNewLength()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - if (hasRecoveryId()) { - hash = (37 * hash) + RECOVERYID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getRecoveryId()); - } - if (hasNewLength()) { - hash = (37 * hash) + NEWLENGTH_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getNewLength()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - recoveryId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - newLength_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.recoveryId_ = recoveryId_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.newLength_ = newLength_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.getDefaultInstance()) return this; - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - if (other.hasRecoveryId()) { - setRecoveryId(other.getRecoveryId()); - } - if (other.hasNewLength()) { - setNewLength(other.getNewLength()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlock()) { - - return false; - } - if (!hasRecoveryId()) { - - return false; - } - if (!hasNewLength()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - case 16: { - bitField0_ |= 0x00000002; - recoveryId_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - newLength_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required .ExtendedBlockProto block = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // required uint64 recoveryId = 2; - private long recoveryId_ ; - public boolean hasRecoveryId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getRecoveryId() { - return recoveryId_; - } - public Builder setRecoveryId(long value) { - bitField0_ |= 0x00000002; - recoveryId_ = value; - onChanged(); - return this; - } - public Builder clearRecoveryId() { - bitField0_ = (bitField0_ & ~0x00000002); - recoveryId_ = 0L; - onChanged(); - return this; - } - - // required uint64 newLength = 3; - private long newLength_ ; - public boolean hasNewLength() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getNewLength() { - return newLength_; - } - public Builder setNewLength(long value) { - bitField0_ |= 0x00000004; - newLength_ = value; - onChanged(); - return this; - } - public Builder clearNewLength() { - bitField0_ = (bitField0_ & ~0x00000004); - newLength_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:UpdateReplicaUnderRecoveryRequestProto) - } - - static { - defaultInstance = new UpdateReplicaUnderRecoveryRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:UpdateReplicaUnderRecoveryRequestProto) - } - - public interface UpdateReplicaUnderRecoveryResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ExtendedBlockProto block = 1; - boolean hasBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder(); - } - public static final class UpdateReplicaUnderRecoveryResponseProto extends - com.google.protobuf.GeneratedMessage - implements UpdateReplicaUnderRecoveryResponseProtoOrBuilder { - // Use UpdateReplicaUnderRecoveryResponseProto.newBuilder() to construct. - private UpdateReplicaUnderRecoveryResponseProto(Builder builder) { - super(builder); - } - private UpdateReplicaUnderRecoveryResponseProto(boolean noInit) {} - - private static final UpdateReplicaUnderRecoveryResponseProto defaultInstance; - public static UpdateReplicaUnderRecoveryResponseProto getDefaultInstance() { - return defaultInstance; - } - - public UpdateReplicaUnderRecoveryResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ExtendedBlockProto block = 1; - public static final int BLOCK_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - return block_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - return block_; - } - - private void initFields() { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlock()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlock().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, block_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, block_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto) obj; - - boolean result = true; - result = result && (hasBlock() == other.hasBlock()); - if (hasBlock()) { - result = result && getBlock() - .equals(other.getBlock()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlock()) { - hash = (37 * hash) + BLOCK_FIELD_NUMBER; - hash = (53 * hash) + getBlock().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlockFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blockBuilder_ == null) { - result.block_ = block_; - } else { - result.block_ = blockBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance()) return this; - if (other.hasBlock()) { - mergeBlock(other.getBlock()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlock()) { - - return false; - } - if (!getBlock().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); - if (hasBlock()) { - subBuilder.mergeFrom(getBlock()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlock(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ExtendedBlockProto block = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_; - public boolean hasBlock() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { - if (blockBuilder_ == null) { - return block_; - } else { - return blockBuilder_.getMessage(); - } - } - public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - block_ = value; - onChanged(); - } else { - blockBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlock( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { - if (blockBuilder_ == null) { - block_ = builderForValue.build(); - onChanged(); - } else { - blockBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { - if (blockBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { - block_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); - } else { - block_ = value; - } - onChanged(); - } else { - blockBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlock() { - if (blockBuilder_ == null) { - block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); - onChanged(); - } else { - blockBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlockFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { - if (blockBuilder_ != null) { - return blockBuilder_.getMessageOrBuilder(); - } else { - return block_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> - getBlockFieldBuilder() { - if (blockBuilder_ == null) { - blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( - block_, - getParentForChildren(), - isClean()); - block_ = null; - } - return blockBuilder_; - } - - // @@protoc_insertion_point(builder_scope:UpdateReplicaUnderRecoveryResponseProto) - } - - static { - defaultInstance = new UpdateReplicaUnderRecoveryResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:UpdateReplicaUnderRecoveryResponseProto) - } - - public static abstract class InterDatanodeProtocolService - implements com.google.protobuf.Service { - protected InterDatanodeProtocolService() {} - - public interface Interface { - public abstract void initReplicaRecovery( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void updateReplicaUnderRecovery( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request, - com.google.protobuf.RpcCallback done); - - } - - public static com.google.protobuf.Service newReflectiveService( - final Interface impl) { - return new InterDatanodeProtocolService() { - @java.lang.Override - public void initReplicaRecovery( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.initReplicaRecovery(controller, request, done); - } - - @java.lang.Override - public void updateReplicaUnderRecovery( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.updateReplicaUnderRecovery(controller, request, done); - } - - }; - } - - public static com.google.protobuf.BlockingService - newReflectiveBlockingService(final BlockingInterface impl) { - return new com.google.protobuf.BlockingService() { - public final com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptorForType() { - return getDescriptor(); - } - - public final com.google.protobuf.Message callBlockingMethod( - com.google.protobuf.Descriptors.MethodDescriptor method, - com.google.protobuf.RpcController controller, - com.google.protobuf.Message request) - throws com.google.protobuf.ServiceException { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.callBlockingMethod() given method descriptor for " + - "wrong service type."); - } - switch(method.getIndex()) { - case 0: - return impl.initReplicaRecovery(controller, (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto)request); - case 1: - return impl.updateReplicaUnderRecovery(controller, (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto)request); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getRequestPrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getRequestPrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getResponsePrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getResponsePrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - }; - } - - public abstract void initReplicaRecovery( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void updateReplicaUnderRecovery( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request, - com.google.protobuf.RpcCallback done); - - public static final - com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.getDescriptor().getServices().get(0); - } - public final com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptorForType() { - return getDescriptor(); - } - - public final void callMethod( - com.google.protobuf.Descriptors.MethodDescriptor method, - com.google.protobuf.RpcController controller, - com.google.protobuf.Message request, - com.google.protobuf.RpcCallback< - com.google.protobuf.Message> done) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.callMethod() given method descriptor for wrong " + - "service type."); - } - switch(method.getIndex()) { - case 0: - this.initReplicaRecovery(controller, (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 1: - this.updateReplicaUnderRecovery(controller, (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getRequestPrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getRequestPrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getResponsePrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getResponsePrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public static Stub newStub( - com.google.protobuf.RpcChannel channel) { - return new Stub(channel); - } - - public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService implements Interface { - private Stub(com.google.protobuf.RpcChannel channel) { - this.channel = channel; - } - - private final com.google.protobuf.RpcChannel channel; - - public com.google.protobuf.RpcChannel getChannel() { - return channel; - } - - public void initReplicaRecovery( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(0), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance())); - } - - public void updateReplicaUnderRecovery( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(1), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance())); - } - } - - public static BlockingInterface newBlockingStub( - com.google.protobuf.BlockingRpcChannel channel) { - return new BlockingStub(channel); - } - - public interface BlockingInterface { - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto initReplicaRecovery( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto updateReplicaUnderRecovery( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request) - throws com.google.protobuf.ServiceException; - } - - private static final class BlockingStub implements BlockingInterface { - private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { - this.channel = channel; - } - - private final com.google.protobuf.BlockingRpcChannel channel; - - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto initReplicaRecovery( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(0), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto updateReplicaUnderRecovery( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(1), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance()); - } - - } - } - - private static com.google.protobuf.Descriptors.Descriptor - internal_static_InitReplicaRecoveryRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_InitReplicaRecoveryRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_InitReplicaRecoveryResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_InitReplicaRecoveryResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_UpdateReplicaUnderRecoveryRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_UpdateReplicaUnderRecoveryRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_UpdateReplicaUnderRecoveryResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_UpdateReplicaUnderRecoveryResponseProto_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; - } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n\033InterDatanodeProtocol.proto\032\nhdfs.prot" + - "o\"G\n\037InitReplicaRecoveryRequestProto\022$\n\005" + - "block\030\001 \002(\0132\025.RecoveringBlockProto\"a\n In" + - "itReplicaRecoveryResponseProto\022!\n\005state\030" + - "\001 \002(\0162\022.ReplicaStateProto\022\032\n\005block\030\002 \002(\013" + - "2\013.BlockProto\"s\n&UpdateReplicaUnderRecov" + - "eryRequestProto\022\"\n\005block\030\001 \002(\0132\023.Extende" + - "dBlockProto\022\022\n\nrecoveryId\030\002 \002(\004\022\021\n\tnewLe" + - "ngth\030\003 \002(\004\"M\n\'UpdateReplicaUnderRecovery" + - "ResponseProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedB", - "lockProto2\353\001\n\034InterDatanodeProtocolServi" + - "ce\022Z\n\023initReplicaRecovery\022 .InitReplicaR" + - "ecoveryRequestProto\032!.InitReplicaRecover" + - "yResponseProto\022o\n\032updateReplicaUnderReco" + - "very\022\'.UpdateReplicaUnderRecoveryRequest" + - "Proto\032(.UpdateReplicaUnderRecoveryRespon" + - "seProtoBJ\n%org.apache.hadoop.hdfs.protoc" + - "ol.protoB\033InterDatanodeProtocolProtos\210\001\001" + - "\240\001\001" - }; - com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { - public com.google.protobuf.ExtensionRegistry assignDescriptors( - com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - internal_static_InitReplicaRecoveryRequestProto_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_InitReplicaRecoveryRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_InitReplicaRecoveryRequestProto_descriptor, - new java.lang.String[] { "Block", }, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.Builder.class); - internal_static_InitReplicaRecoveryResponseProto_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_InitReplicaRecoveryResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_InitReplicaRecoveryResponseProto_descriptor, - new java.lang.String[] { "State", "Block", }, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.Builder.class); - internal_static_UpdateReplicaUnderRecoveryRequestProto_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_UpdateReplicaUnderRecoveryRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_UpdateReplicaUnderRecoveryRequestProto_descriptor, - new java.lang.String[] { "Block", "RecoveryId", "NewLength", }, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.Builder.class); - internal_static_UpdateReplicaUnderRecoveryResponseProto_descriptor = - getDescriptor().getMessageTypes().get(3); - internal_static_UpdateReplicaUnderRecoveryResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_UpdateReplicaUnderRecoveryResponseProto_descriptor, - new java.lang.String[] { "Block", }, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.Builder.class); - return null; - } - }; - com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(), - }, assigner); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/JournalProtocolProtos.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/JournalProtocolProtos.java deleted file mode 100644 index 74267456b73..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/JournalProtocolProtos.java +++ /dev/null @@ -1,2234 +0,0 @@ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: JournalProtocol.proto - -package org.apache.hadoop.hdfs.protocol.proto; - -public final class JournalProtocolProtos { - private JournalProtocolProtos() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistry registry) { - } - public interface JournalRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .NamenodeRegistrationProto registration = 1; - boolean hasRegistration(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); - - // required uint64 firstTxnId = 2; - boolean hasFirstTxnId(); - long getFirstTxnId(); - - // required uint32 numTxns = 3; - boolean hasNumTxns(); - int getNumTxns(); - - // required bytes records = 4; - boolean hasRecords(); - com.google.protobuf.ByteString getRecords(); - } - public static final class JournalRequestProto extends - com.google.protobuf.GeneratedMessage - implements JournalRequestProtoOrBuilder { - // Use JournalRequestProto.newBuilder() to construct. - private JournalRequestProto(Builder builder) { - super(builder); - } - private JournalRequestProto(boolean noInit) {} - - private static final JournalRequestProto defaultInstance; - public static JournalRequestProto getDefaultInstance() { - return defaultInstance; - } - - public JournalRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .NamenodeRegistrationProto registration = 1; - public static final int REGISTRATION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { - return registration_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - return registration_; - } - - // required uint64 firstTxnId = 2; - public static final int FIRSTTXNID_FIELD_NUMBER = 2; - private long firstTxnId_; - public boolean hasFirstTxnId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getFirstTxnId() { - return firstTxnId_; - } - - // required uint32 numTxns = 3; - public static final int NUMTXNS_FIELD_NUMBER = 3; - private int numTxns_; - public boolean hasNumTxns() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public int getNumTxns() { - return numTxns_; - } - - // required bytes records = 4; - public static final int RECORDS_FIELD_NUMBER = 4; - private com.google.protobuf.ByteString records_; - public boolean hasRecords() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public com.google.protobuf.ByteString getRecords() { - return records_; - } - - private void initFields() { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - firstTxnId_ = 0L; - numTxns_ = 0; - records_ = com.google.protobuf.ByteString.EMPTY; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRegistration()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasFirstTxnId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasNumTxns()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasRecords()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegistration().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, registration_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, firstTxnId_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt32(3, numTxns_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBytes(4, records_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, registration_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, firstTxnId_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(3, numTxns_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(4, records_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto) obj; - - boolean result = true; - result = result && (hasRegistration() == other.hasRegistration()); - if (hasRegistration()) { - result = result && getRegistration() - .equals(other.getRegistration()); - } - result = result && (hasFirstTxnId() == other.hasFirstTxnId()); - if (hasFirstTxnId()) { - result = result && (getFirstTxnId() - == other.getFirstTxnId()); - } - result = result && (hasNumTxns() == other.hasNumTxns()); - if (hasNumTxns()) { - result = result && (getNumTxns() - == other.getNumTxns()); - } - result = result && (hasRecords() == other.hasRecords()); - if (hasRecords()) { - result = result && getRecords() - .equals(other.getRecords()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegistration()) { - hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; - hash = (53 * hash) + getRegistration().hashCode(); - } - if (hasFirstTxnId()) { - hash = (37 * hash) + FIRSTTXNID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getFirstTxnId()); - } - if (hasNumTxns()) { - hash = (37 * hash) + NUMTXNS_FIELD_NUMBER; - hash = (53 * hash) + getNumTxns(); - } - if (hasRecords()) { - hash = (37 * hash) + RECORDS_FIELD_NUMBER; - hash = (53 * hash) + getRecords().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegistrationFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - firstTxnId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - numTxns_ = 0; - bitField0_ = (bitField0_ & ~0x00000004); - records_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (registrationBuilder_ == null) { - result.registration_ = registration_; - } else { - result.registration_ = registrationBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.firstTxnId_ = firstTxnId_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.numTxns_ = numTxns_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.records_ = records_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance()) return this; - if (other.hasRegistration()) { - mergeRegistration(other.getRegistration()); - } - if (other.hasFirstTxnId()) { - setFirstTxnId(other.getFirstTxnId()); - } - if (other.hasNumTxns()) { - setNumTxns(other.getNumTxns()); - } - if (other.hasRecords()) { - setRecords(other.getRecords()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRegistration()) { - - return false; - } - if (!hasFirstTxnId()) { - - return false; - } - if (!hasNumTxns()) { - - return false; - } - if (!hasRecords()) { - - return false; - } - if (!getRegistration().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(); - if (hasRegistration()) { - subBuilder.mergeFrom(getRegistration()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegistration(subBuilder.buildPartial()); - break; - } - case 16: { - bitField0_ |= 0x00000002; - firstTxnId_ = input.readUInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - numTxns_ = input.readUInt32(); - break; - } - case 34: { - bitField0_ |= 0x00000008; - records_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required .NamenodeRegistrationProto registration = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { - if (registrationBuilder_ == null) { - return registration_; - } else { - return registrationBuilder_.getMessage(); - } - } - public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - registration_ = value; - onChanged(); - } else { - registrationBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setRegistration( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) { - if (registrationBuilder_ == null) { - registration_ = builderForValue.build(); - onChanged(); - } else { - registrationBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) { - registration_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); - } else { - registration_ = value; - } - onChanged(); - } else { - registrationBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearRegistration() { - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - onChanged(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegistrationFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - if (registrationBuilder_ != null) { - return registrationBuilder_.getMessageOrBuilder(); - } else { - return registration_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> - getRegistrationFieldBuilder() { - if (registrationBuilder_ == null) { - registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>( - registration_, - getParentForChildren(), - isClean()); - registration_ = null; - } - return registrationBuilder_; - } - - // required uint64 firstTxnId = 2; - private long firstTxnId_ ; - public boolean hasFirstTxnId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getFirstTxnId() { - return firstTxnId_; - } - public Builder setFirstTxnId(long value) { - bitField0_ |= 0x00000002; - firstTxnId_ = value; - onChanged(); - return this; - } - public Builder clearFirstTxnId() { - bitField0_ = (bitField0_ & ~0x00000002); - firstTxnId_ = 0L; - onChanged(); - return this; - } - - // required uint32 numTxns = 3; - private int numTxns_ ; - public boolean hasNumTxns() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public int getNumTxns() { - return numTxns_; - } - public Builder setNumTxns(int value) { - bitField0_ |= 0x00000004; - numTxns_ = value; - onChanged(); - return this; - } - public Builder clearNumTxns() { - bitField0_ = (bitField0_ & ~0x00000004); - numTxns_ = 0; - onChanged(); - return this; - } - - // required bytes records = 4; - private com.google.protobuf.ByteString records_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasRecords() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public com.google.protobuf.ByteString getRecords() { - return records_; - } - public Builder setRecords(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - records_ = value; - onChanged(); - return this; - } - public Builder clearRecords() { - bitField0_ = (bitField0_ & ~0x00000008); - records_ = getDefaultInstance().getRecords(); - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:JournalRequestProto) - } - - static { - defaultInstance = new JournalRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:JournalRequestProto) - } - - public interface JournalResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class JournalResponseProto extends - com.google.protobuf.GeneratedMessage - implements JournalResponseProtoOrBuilder { - // Use JournalResponseProto.newBuilder() to construct. - private JournalResponseProto(Builder builder) { - super(builder); - } - private JournalResponseProto(boolean noInit) {} - - private static final JournalResponseProto defaultInstance; - public static JournalResponseProto getDefaultInstance() { - return defaultInstance; - } - - public JournalResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:JournalResponseProto) - } - - static { - defaultInstance = new JournalResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:JournalResponseProto) - } - - public interface StartLogSegmentRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .NamenodeRegistrationProto registration = 1; - boolean hasRegistration(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); - - // required uint64 txid = 2; - boolean hasTxid(); - long getTxid(); - } - public static final class StartLogSegmentRequestProto extends - com.google.protobuf.GeneratedMessage - implements StartLogSegmentRequestProtoOrBuilder { - // Use StartLogSegmentRequestProto.newBuilder() to construct. - private StartLogSegmentRequestProto(Builder builder) { - super(builder); - } - private StartLogSegmentRequestProto(boolean noInit) {} - - private static final StartLogSegmentRequestProto defaultInstance; - public static StartLogSegmentRequestProto getDefaultInstance() { - return defaultInstance; - } - - public StartLogSegmentRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .NamenodeRegistrationProto registration = 1; - public static final int REGISTRATION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { - return registration_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - return registration_; - } - - // required uint64 txid = 2; - public static final int TXID_FIELD_NUMBER = 2; - private long txid_; - public boolean hasTxid() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getTxid() { - return txid_; - } - - private void initFields() { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - txid_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRegistration()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasTxid()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegistration().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, registration_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, txid_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, registration_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, txid_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto) obj; - - boolean result = true; - result = result && (hasRegistration() == other.hasRegistration()); - if (hasRegistration()) { - result = result && getRegistration() - .equals(other.getRegistration()); - } - result = result && (hasTxid() == other.hasTxid()); - if (hasTxid()) { - result = result && (getTxid() - == other.getTxid()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegistration()) { - hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; - hash = (53 * hash) + getRegistration().hashCode(); - } - if (hasTxid()) { - hash = (37 * hash) + TXID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getTxid()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegistrationFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - txid_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (registrationBuilder_ == null) { - result.registration_ = registration_; - } else { - result.registration_ = registrationBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.txid_ = txid_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance()) return this; - if (other.hasRegistration()) { - mergeRegistration(other.getRegistration()); - } - if (other.hasTxid()) { - setTxid(other.getTxid()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRegistration()) { - - return false; - } - if (!hasTxid()) { - - return false; - } - if (!getRegistration().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(); - if (hasRegistration()) { - subBuilder.mergeFrom(getRegistration()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegistration(subBuilder.buildPartial()); - break; - } - case 16: { - bitField0_ |= 0x00000002; - txid_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required .NamenodeRegistrationProto registration = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { - if (registrationBuilder_ == null) { - return registration_; - } else { - return registrationBuilder_.getMessage(); - } - } - public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - registration_ = value; - onChanged(); - } else { - registrationBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setRegistration( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) { - if (registrationBuilder_ == null) { - registration_ = builderForValue.build(); - onChanged(); - } else { - registrationBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) { - registration_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); - } else { - registration_ = value; - } - onChanged(); - } else { - registrationBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearRegistration() { - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - onChanged(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegistrationFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - if (registrationBuilder_ != null) { - return registrationBuilder_.getMessageOrBuilder(); - } else { - return registration_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> - getRegistrationFieldBuilder() { - if (registrationBuilder_ == null) { - registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>( - registration_, - getParentForChildren(), - isClean()); - registration_ = null; - } - return registrationBuilder_; - } - - // required uint64 txid = 2; - private long txid_ ; - public boolean hasTxid() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getTxid() { - return txid_; - } - public Builder setTxid(long value) { - bitField0_ |= 0x00000002; - txid_ = value; - onChanged(); - return this; - } - public Builder clearTxid() { - bitField0_ = (bitField0_ & ~0x00000002); - txid_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:StartLogSegmentRequestProto) - } - - static { - defaultInstance = new StartLogSegmentRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:StartLogSegmentRequestProto) - } - - public interface StartLogSegmentResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class StartLogSegmentResponseProto extends - com.google.protobuf.GeneratedMessage - implements StartLogSegmentResponseProtoOrBuilder { - // Use StartLogSegmentResponseProto.newBuilder() to construct. - private StartLogSegmentResponseProto(Builder builder) { - super(builder); - } - private StartLogSegmentResponseProto(boolean noInit) {} - - private static final StartLogSegmentResponseProto defaultInstance; - public static StartLogSegmentResponseProto getDefaultInstance() { - return defaultInstance; - } - - public StartLogSegmentResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:StartLogSegmentResponseProto) - } - - static { - defaultInstance = new StartLogSegmentResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:StartLogSegmentResponseProto) - } - - public static abstract class JournalProtocolService - implements com.google.protobuf.Service { - protected JournalProtocolService() {} - - public interface Interface { - public abstract void journal( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void startLogSegment( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request, - com.google.protobuf.RpcCallback done); - - } - - public static com.google.protobuf.Service newReflectiveService( - final Interface impl) { - return new JournalProtocolService() { - @java.lang.Override - public void journal( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.journal(controller, request, done); - } - - @java.lang.Override - public void startLogSegment( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.startLogSegment(controller, request, done); - } - - }; - } - - public static com.google.protobuf.BlockingService - newReflectiveBlockingService(final BlockingInterface impl) { - return new com.google.protobuf.BlockingService() { - public final com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptorForType() { - return getDescriptor(); - } - - public final com.google.protobuf.Message callBlockingMethod( - com.google.protobuf.Descriptors.MethodDescriptor method, - com.google.protobuf.RpcController controller, - com.google.protobuf.Message request) - throws com.google.protobuf.ServiceException { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.callBlockingMethod() given method descriptor for " + - "wrong service type."); - } - switch(method.getIndex()) { - case 0: - return impl.journal(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)request); - case 1: - return impl.startLogSegment(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)request); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getRequestPrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getRequestPrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getResponsePrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getResponsePrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - }; - } - - public abstract void journal( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void startLogSegment( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request, - com.google.protobuf.RpcCallback done); - - public static final - com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.getDescriptor().getServices().get(0); - } - public final com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptorForType() { - return getDescriptor(); - } - - public final void callMethod( - com.google.protobuf.Descriptors.MethodDescriptor method, - com.google.protobuf.RpcController controller, - com.google.protobuf.Message request, - com.google.protobuf.RpcCallback< - com.google.protobuf.Message> done) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.callMethod() given method descriptor for wrong " + - "service type."); - } - switch(method.getIndex()) { - case 0: - this.journal(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 1: - this.startLogSegment(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getRequestPrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getRequestPrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getResponsePrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getResponsePrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public static Stub newStub( - com.google.protobuf.RpcChannel channel) { - return new Stub(channel); - } - - public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService implements Interface { - private Stub(com.google.protobuf.RpcChannel channel) { - this.channel = channel; - } - - private final com.google.protobuf.RpcChannel channel; - - public com.google.protobuf.RpcChannel getChannel() { - return channel; - } - - public void journal( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(0), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance())); - } - - public void startLogSegment( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(1), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance())); - } - } - - public static BlockingInterface newBlockingStub( - com.google.protobuf.BlockingRpcChannel channel) { - return new BlockingStub(channel); - } - - public interface BlockingInterface { - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto journal( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto startLogSegment( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request) - throws com.google.protobuf.ServiceException; - } - - private static final class BlockingStub implements BlockingInterface { - private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { - this.channel = channel; - } - - private final com.google.protobuf.BlockingRpcChannel channel; - - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto journal( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(0), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto startLogSegment( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(1), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()); - } - - } - } - - private static com.google.protobuf.Descriptors.Descriptor - internal_static_JournalRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_JournalRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_JournalResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_JournalResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_StartLogSegmentRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_StartLogSegmentRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_StartLogSegmentResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_StartLogSegmentResponseProto_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; - } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n\025JournalProtocol.proto\032\nhdfs.proto\"}\n\023J" + - "ournalRequestProto\0220\n\014registration\030\001 \002(\013" + - "2\032.NamenodeRegistrationProto\022\022\n\nfirstTxn" + - "Id\030\002 \002(\004\022\017\n\007numTxns\030\003 \002(\r\022\017\n\007records\030\004 \002" + - "(\014\"\026\n\024JournalResponseProto\"]\n\033StartLogSe" + - "gmentRequestProto\0220\n\014registration\030\001 \002(\0132" + - "\032.NamenodeRegistrationProto\022\014\n\004txid\030\002 \002(" + - "\004\"\036\n\034StartLogSegmentResponseProto2\240\001\n\026Jo" + - "urnalProtocolService\0226\n\007journal\022\024.Journa" + - "lRequestProto\032\025.JournalResponseProto\022N\n\017", - "startLogSegment\022\034.StartLogSegmentRequest" + - "Proto\032\035.StartLogSegmentResponseProtoBD\n%" + - "org.apache.hadoop.hdfs.protocol.protoB\025J" + - "ournalProtocolProtos\210\001\001\240\001\001" - }; - com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { - public com.google.protobuf.ExtensionRegistry assignDescriptors( - com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - internal_static_JournalRequestProto_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_JournalRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_JournalRequestProto_descriptor, - new java.lang.String[] { "Registration", "FirstTxnId", "NumTxns", "Records", }, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.Builder.class); - internal_static_JournalResponseProto_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_JournalResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_JournalResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.Builder.class); - internal_static_StartLogSegmentRequestProto_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_StartLogSegmentRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_StartLogSegmentRequestProto_descriptor, - new java.lang.String[] { "Registration", "Txid", }, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.Builder.class); - internal_static_StartLogSegmentResponseProto_descriptor = - getDescriptor().getMessageTypes().get(3); - internal_static_StartLogSegmentResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_StartLogSegmentResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.Builder.class); - return null; - } - }; - com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(), - }, assigner); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/NamenodeProtocolProtos.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/NamenodeProtocolProtos.java deleted file mode 100644 index 239674b7024..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/NamenodeProtocolProtos.java +++ /dev/null @@ -1,9015 +0,0 @@ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: NamenodeProtocol.proto - -package org.apache.hadoop.hdfs.protocol.proto; - -public final class NamenodeProtocolProtos { - private NamenodeProtocolProtos() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistry registry) { - } - public interface GetBlocksRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .DatanodeIDProto datanode = 1; - boolean hasDatanode(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanode(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeOrBuilder(); - - // required uint64 size = 2; - boolean hasSize(); - long getSize(); - } - public static final class GetBlocksRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetBlocksRequestProtoOrBuilder { - // Use GetBlocksRequestProto.newBuilder() to construct. - private GetBlocksRequestProto(Builder builder) { - super(builder); - } - private GetBlocksRequestProto(boolean noInit) {} - - private static final GetBlocksRequestProto defaultInstance; - public static GetBlocksRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetBlocksRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .DatanodeIDProto datanode = 1; - public static final int DATANODE_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanode_; - public boolean hasDatanode() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanode() { - return datanode_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeOrBuilder() { - return datanode_; - } - - // required uint64 size = 2; - public static final int SIZE_FIELD_NUMBER = 2; - private long size_; - public boolean hasSize() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getSize() { - return size_; - } - - private void initFields() { - datanode_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); - size_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasDatanode()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasSize()) { - memoizedIsInitialized = 0; - return false; - } - if (!getDatanode().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, datanode_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, size_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, datanode_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, size_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto) obj; - - boolean result = true; - result = result && (hasDatanode() == other.hasDatanode()); - if (hasDatanode()) { - result = result && getDatanode() - .equals(other.getDatanode()); - } - result = result && (hasSize() == other.hasSize()); - if (hasSize()) { - result = result && (getSize() - == other.getSize()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasDatanode()) { - hash = (37 * hash) + DATANODE_FIELD_NUMBER; - hash = (53 * hash) + getDatanode().hashCode(); - } - if (hasSize()) { - hash = (37 * hash) + SIZE_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getSize()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getDatanodeFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (datanodeBuilder_ == null) { - datanode_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); - } else { - datanodeBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - size_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (datanodeBuilder_ == null) { - result.datanode_ = datanode_; - } else { - result.datanode_ = datanodeBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.size_ = size_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.getDefaultInstance()) return this; - if (other.hasDatanode()) { - mergeDatanode(other.getDatanode()); - } - if (other.hasSize()) { - setSize(other.getSize()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasDatanode()) { - - return false; - } - if (!hasSize()) { - - return false; - } - if (!getDatanode().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(); - if (hasDatanode()) { - subBuilder.mergeFrom(getDatanode()); - } - input.readMessage(subBuilder, extensionRegistry); - setDatanode(subBuilder.buildPartial()); - break; - } - case 16: { - bitField0_ |= 0x00000002; - size_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required .DatanodeIDProto datanode = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanode_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> datanodeBuilder_; - public boolean hasDatanode() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanode() { - if (datanodeBuilder_ == null) { - return datanode_; - } else { - return datanodeBuilder_.getMessage(); - } - } - public Builder setDatanode(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { - if (datanodeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - datanode_ = value; - onChanged(); - } else { - datanodeBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setDatanode( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { - if (datanodeBuilder_ == null) { - datanode_ = builderForValue.build(); - onChanged(); - } else { - datanodeBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeDatanode(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { - if (datanodeBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - datanode_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) { - datanode_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(datanode_).mergeFrom(value).buildPartial(); - } else { - datanode_ = value; - } - onChanged(); - } else { - datanodeBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearDatanode() { - if (datanodeBuilder_ == null) { - datanode_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); - onChanged(); - } else { - datanodeBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getDatanodeBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getDatanodeFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeOrBuilder() { - if (datanodeBuilder_ != null) { - return datanodeBuilder_.getMessageOrBuilder(); - } else { - return datanode_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> - getDatanodeFieldBuilder() { - if (datanodeBuilder_ == null) { - datanodeBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>( - datanode_, - getParentForChildren(), - isClean()); - datanode_ = null; - } - return datanodeBuilder_; - } - - // required uint64 size = 2; - private long size_ ; - public boolean hasSize() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public long getSize() { - return size_; - } - public Builder setSize(long value) { - bitField0_ |= 0x00000002; - size_ = value; - onChanged(); - return this; - } - public Builder clearSize() { - bitField0_ = (bitField0_ & ~0x00000002); - size_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:GetBlocksRequestProto) - } - - static { - defaultInstance = new GetBlocksRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetBlocksRequestProto) - } - - public interface GetBlocksResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .BlocksWithLocationsProto blocks = 1; - boolean hasBlocks(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto getBlocks(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProtoOrBuilder getBlocksOrBuilder(); - } - public static final class GetBlocksResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetBlocksResponseProtoOrBuilder { - // Use GetBlocksResponseProto.newBuilder() to construct. - private GetBlocksResponseProto(Builder builder) { - super(builder); - } - private GetBlocksResponseProto(boolean noInit) {} - - private static final GetBlocksResponseProto defaultInstance; - public static GetBlocksResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetBlocksResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .BlocksWithLocationsProto blocks = 1; - public static final int BLOCKS_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto blocks_; - public boolean hasBlocks() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto getBlocks() { - return blocks_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProtoOrBuilder getBlocksOrBuilder() { - return blocks_; - } - - private void initFields() { - blocks_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBlocks()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBlocks().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, blocks_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, blocks_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto) obj; - - boolean result = true; - result = result && (hasBlocks() == other.hasBlocks()); - if (hasBlocks()) { - result = result && getBlocks() - .equals(other.getBlocks()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBlocks()) { - hash = (37 * hash) + BLOCKS_FIELD_NUMBER; - hash = (53 * hash) + getBlocks().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBlocksFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (blocksBuilder_ == null) { - blocks_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.getDefaultInstance(); - } else { - blocksBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (blocksBuilder_ == null) { - result.blocks_ = blocks_; - } else { - result.blocks_ = blocksBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance()) return this; - if (other.hasBlocks()) { - mergeBlocks(other.getBlocks()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBlocks()) { - - return false; - } - if (!getBlocks().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.newBuilder(); - if (hasBlocks()) { - subBuilder.mergeFrom(getBlocks()); - } - input.readMessage(subBuilder, extensionRegistry); - setBlocks(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .BlocksWithLocationsProto blocks = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto blocks_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProtoOrBuilder> blocksBuilder_; - public boolean hasBlocks() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto getBlocks() { - if (blocksBuilder_ == null) { - return blocks_; - } else { - return blocksBuilder_.getMessage(); - } - } - public Builder setBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto value) { - if (blocksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - blocks_ = value; - onChanged(); - } else { - blocksBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setBlocks( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.Builder builderForValue) { - if (blocksBuilder_ == null) { - blocks_ = builderForValue.build(); - onChanged(); - } else { - blocksBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto value) { - if (blocksBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - blocks_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.getDefaultInstance()) { - blocks_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.newBuilder(blocks_).mergeFrom(value).buildPartial(); - } else { - blocks_ = value; - } - onChanged(); - } else { - blocksBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearBlocks() { - if (blocksBuilder_ == null) { - blocks_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.getDefaultInstance(); - onChanged(); - } else { - blocksBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.Builder getBlocksBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getBlocksFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProtoOrBuilder getBlocksOrBuilder() { - if (blocksBuilder_ != null) { - return blocksBuilder_.getMessageOrBuilder(); - } else { - return blocks_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProtoOrBuilder> - getBlocksFieldBuilder() { - if (blocksBuilder_ == null) { - blocksBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProtoOrBuilder>( - blocks_, - getParentForChildren(), - isClean()); - blocks_ = null; - } - return blocksBuilder_; - } - - // @@protoc_insertion_point(builder_scope:GetBlocksResponseProto) - } - - static { - defaultInstance = new GetBlocksResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetBlocksResponseProto) - } - - public interface GetBlockKeysRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class GetBlockKeysRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetBlockKeysRequestProtoOrBuilder { - // Use GetBlockKeysRequestProto.newBuilder() to construct. - private GetBlockKeysRequestProto(Builder builder) { - super(builder); - } - private GetBlockKeysRequestProto(boolean noInit) {} - - private static final GetBlockKeysRequestProto defaultInstance; - public static GetBlockKeysRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetBlockKeysRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysRequestProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:GetBlockKeysRequestProto) - } - - static { - defaultInstance = new GetBlockKeysRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetBlockKeysRequestProto) - } - - public interface GetBlockKeysResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ExportedBlockKeysProto keys = 1; - boolean hasKeys(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder(); - } - public static final class GetBlockKeysResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetBlockKeysResponseProtoOrBuilder { - // Use GetBlockKeysResponseProto.newBuilder() to construct. - private GetBlockKeysResponseProto(Builder builder) { - super(builder); - } - private GetBlockKeysResponseProto(boolean noInit) {} - - private static final GetBlockKeysResponseProto defaultInstance; - public static GetBlockKeysResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetBlockKeysResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .ExportedBlockKeysProto keys = 1; - public static final int KEYS_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_; - public boolean hasKeys() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() { - return keys_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() { - return keys_; - } - - private void initFields() { - keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasKeys()) { - memoizedIsInitialized = 0; - return false; - } - if (!getKeys().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, keys_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, keys_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto) obj; - - boolean result = true; - result = result && (hasKeys() == other.hasKeys()); - if (hasKeys()) { - result = result && getKeys() - .equals(other.getKeys()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasKeys()) { - hash = (37 * hash) + KEYS_FIELD_NUMBER; - hash = (53 * hash) + getKeys().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getKeysFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (keysBuilder_ == null) { - keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); - } else { - keysBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (keysBuilder_ == null) { - result.keys_ = keys_; - } else { - result.keys_ = keysBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance()) return this; - if (other.hasKeys()) { - mergeKeys(other.getKeys()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasKeys()) { - - return false; - } - if (!getKeys().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder(); - if (hasKeys()) { - subBuilder.mergeFrom(getKeys()); - } - input.readMessage(subBuilder, extensionRegistry); - setKeys(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ExportedBlockKeysProto keys = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder> keysBuilder_; - public boolean hasKeys() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() { - if (keysBuilder_ == null) { - return keys_; - } else { - return keysBuilder_.getMessage(); - } - } - public Builder setKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) { - if (keysBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - keys_ = value; - onChanged(); - } else { - keysBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setKeys( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder builderForValue) { - if (keysBuilder_ == null) { - keys_ = builderForValue.build(); - onChanged(); - } else { - keysBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) { - if (keysBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - keys_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance()) { - keys_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder(keys_).mergeFrom(value).buildPartial(); - } else { - keys_ = value; - } - onChanged(); - } else { - keysBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearKeys() { - if (keysBuilder_ == null) { - keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); - onChanged(); - } else { - keysBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder getKeysBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getKeysFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() { - if (keysBuilder_ != null) { - return keysBuilder_.getMessageOrBuilder(); - } else { - return keys_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder> - getKeysFieldBuilder() { - if (keysBuilder_ == null) { - keysBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder>( - keys_, - getParentForChildren(), - isClean()); - keys_ = null; - } - return keysBuilder_; - } - - // @@protoc_insertion_point(builder_scope:GetBlockKeysResponseProto) - } - - static { - defaultInstance = new GetBlockKeysResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetBlockKeysResponseProto) - } - - public interface GetTransactionIdRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class GetTransactionIdRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetTransactionIdRequestProtoOrBuilder { - // Use GetTransactionIdRequestProto.newBuilder() to construct. - private GetTransactionIdRequestProto(Builder builder) { - super(builder); - } - private GetTransactionIdRequestProto(boolean noInit) {} - - private static final GetTransactionIdRequestProto defaultInstance; - public static GetTransactionIdRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetTransactionIdRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdRequestProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:GetTransactionIdRequestProto) - } - - static { - defaultInstance = new GetTransactionIdRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetTransactionIdRequestProto) - } - - public interface GetTransactionIdResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint64 txId = 1; - boolean hasTxId(); - long getTxId(); - } - public static final class GetTransactionIdResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetTransactionIdResponseProtoOrBuilder { - // Use GetTransactionIdResponseProto.newBuilder() to construct. - private GetTransactionIdResponseProto(Builder builder) { - super(builder); - } - private GetTransactionIdResponseProto(boolean noInit) {} - - private static final GetTransactionIdResponseProto defaultInstance; - public static GetTransactionIdResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetTransactionIdResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint64 txId = 1; - public static final int TXID_FIELD_NUMBER = 1; - private long txId_; - public boolean hasTxId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getTxId() { - return txId_; - } - - private void initFields() { - txId_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasTxId()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, txId_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, txId_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto) obj; - - boolean result = true; - result = result && (hasTxId() == other.hasTxId()); - if (hasTxId()) { - result = result && (getTxId() - == other.getTxId()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasTxId()) { - hash = (37 * hash) + TXID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getTxId()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - txId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.txId_ = txId_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance()) return this; - if (other.hasTxId()) { - setTxId(other.getTxId()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasTxId()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - txId_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required uint64 txId = 1; - private long txId_ ; - public boolean hasTxId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getTxId() { - return txId_; - } - public Builder setTxId(long value) { - bitField0_ |= 0x00000001; - txId_ = value; - onChanged(); - return this; - } - public Builder clearTxId() { - bitField0_ = (bitField0_ & ~0x00000001); - txId_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:GetTransactionIdResponseProto) - } - - static { - defaultInstance = new GetTransactionIdResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetTransactionIdResponseProto) - } - - public interface RollEditLogRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class RollEditLogRequestProto extends - com.google.protobuf.GeneratedMessage - implements RollEditLogRequestProtoOrBuilder { - // Use RollEditLogRequestProto.newBuilder() to construct. - private RollEditLogRequestProto(Builder builder) { - super(builder); - } - private RollEditLogRequestProto(boolean noInit) {} - - private static final RollEditLogRequestProto defaultInstance; - public static RollEditLogRequestProto getDefaultInstance() { - return defaultInstance; - } - - public RollEditLogRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogRequestProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:RollEditLogRequestProto) - } - - static { - defaultInstance = new RollEditLogRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RollEditLogRequestProto) - } - - public interface RollEditLogResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .CheckpointSignatureProto signature = 1; - boolean hasSignature(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder(); - } - public static final class RollEditLogResponseProto extends - com.google.protobuf.GeneratedMessage - implements RollEditLogResponseProtoOrBuilder { - // Use RollEditLogResponseProto.newBuilder() to construct. - private RollEditLogResponseProto(Builder builder) { - super(builder); - } - private RollEditLogResponseProto(boolean noInit) {} - - private static final RollEditLogResponseProto defaultInstance; - public static RollEditLogResponseProto getDefaultInstance() { - return defaultInstance; - } - - public RollEditLogResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .CheckpointSignatureProto signature = 1; - public static final int SIGNATURE_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto signature_; - public boolean hasSignature() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature() { - return signature_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { - return signature_; - } - - private void initFields() { - signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSignature()) { - memoizedIsInitialized = 0; - return false; - } - if (!getSignature().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, signature_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, signature_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto) obj; - - boolean result = true; - result = result && (hasSignature() == other.hasSignature()); - if (hasSignature()) { - result = result && getSignature() - .equals(other.getSignature()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSignature()) { - hash = (37 * hash) + SIGNATURE_FIELD_NUMBER; - hash = (53 * hash) + getSignature().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getSignatureFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (signatureBuilder_ == null) { - signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); - } else { - signatureBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (signatureBuilder_ == null) { - result.signature_ = signature_; - } else { - result.signature_ = signatureBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance()) return this; - if (other.hasSignature()) { - mergeSignature(other.getSignature()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSignature()) { - - return false; - } - if (!getSignature().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder(); - if (hasSignature()) { - subBuilder.mergeFrom(getSignature()); - } - input.readMessage(subBuilder, extensionRegistry); - setSignature(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .CheckpointSignatureProto signature = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder> signatureBuilder_; - public boolean hasSignature() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature() { - if (signatureBuilder_ == null) { - return signature_; - } else { - return signatureBuilder_.getMessage(); - } - } - public Builder setSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto value) { - if (signatureBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - signature_ = value; - onChanged(); - } else { - signatureBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setSignature( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder builderForValue) { - if (signatureBuilder_ == null) { - signature_ = builderForValue.build(); - onChanged(); - } else { - signatureBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto value) { - if (signatureBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - signature_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance()) { - signature_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder(signature_).mergeFrom(value).buildPartial(); - } else { - signature_ = value; - } - onChanged(); - } else { - signatureBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearSignature() { - if (signatureBuilder_ == null) { - signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); - onChanged(); - } else { - signatureBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder getSignatureBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getSignatureFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { - if (signatureBuilder_ != null) { - return signatureBuilder_.getMessageOrBuilder(); - } else { - return signature_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder> - getSignatureFieldBuilder() { - if (signatureBuilder_ == null) { - signatureBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder>( - signature_, - getParentForChildren(), - isClean()); - signature_ = null; - } - return signatureBuilder_; - } - - // @@protoc_insertion_point(builder_scope:RollEditLogResponseProto) - } - - static { - defaultInstance = new RollEditLogResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RollEditLogResponseProto) - } - - public interface ErrorReportRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .NamenodeRegistrationProto registration = 1; - boolean hasRegistration(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); - - // required uint32 errorCode = 2; - boolean hasErrorCode(); - int getErrorCode(); - - // required string msg = 3; - boolean hasMsg(); - String getMsg(); - } - public static final class ErrorReportRequestProto extends - com.google.protobuf.GeneratedMessage - implements ErrorReportRequestProtoOrBuilder { - // Use ErrorReportRequestProto.newBuilder() to construct. - private ErrorReportRequestProto(Builder builder) { - super(builder); - } - private ErrorReportRequestProto(boolean noInit) {} - - private static final ErrorReportRequestProto defaultInstance; - public static ErrorReportRequestProto getDefaultInstance() { - return defaultInstance; - } - - public ErrorReportRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .NamenodeRegistrationProto registration = 1; - public static final int REGISTRATION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { - return registration_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - return registration_; - } - - // required uint32 errorCode = 2; - public static final int ERRORCODE_FIELD_NUMBER = 2; - private int errorCode_; - public boolean hasErrorCode() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getErrorCode() { - return errorCode_; - } - - // required string msg = 3; - public static final int MSG_FIELD_NUMBER = 3; - private java.lang.Object msg_; - public boolean hasMsg() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getMsg() { - java.lang.Object ref = msg_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - msg_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getMsgBytes() { - java.lang.Object ref = msg_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - msg_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - errorCode_ = 0; - msg_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRegistration()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasErrorCode()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasMsg()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegistration().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, registration_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt32(2, errorCode_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getMsgBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, registration_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(2, errorCode_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getMsgBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto) obj; - - boolean result = true; - result = result && (hasRegistration() == other.hasRegistration()); - if (hasRegistration()) { - result = result && getRegistration() - .equals(other.getRegistration()); - } - result = result && (hasErrorCode() == other.hasErrorCode()); - if (hasErrorCode()) { - result = result && (getErrorCode() - == other.getErrorCode()); - } - result = result && (hasMsg() == other.hasMsg()); - if (hasMsg()) { - result = result && getMsg() - .equals(other.getMsg()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegistration()) { - hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; - hash = (53 * hash) + getRegistration().hashCode(); - } - if (hasErrorCode()) { - hash = (37 * hash) + ERRORCODE_FIELD_NUMBER; - hash = (53 * hash) + getErrorCode(); - } - if (hasMsg()) { - hash = (37 * hash) + MSG_FIELD_NUMBER; - hash = (53 * hash) + getMsg().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegistrationFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - errorCode_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - msg_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (registrationBuilder_ == null) { - result.registration_ = registration_; - } else { - result.registration_ = registrationBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.errorCode_ = errorCode_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.msg_ = msg_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance()) return this; - if (other.hasRegistration()) { - mergeRegistration(other.getRegistration()); - } - if (other.hasErrorCode()) { - setErrorCode(other.getErrorCode()); - } - if (other.hasMsg()) { - setMsg(other.getMsg()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRegistration()) { - - return false; - } - if (!hasErrorCode()) { - - return false; - } - if (!hasMsg()) { - - return false; - } - if (!getRegistration().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(); - if (hasRegistration()) { - subBuilder.mergeFrom(getRegistration()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegistration(subBuilder.buildPartial()); - break; - } - case 16: { - bitField0_ |= 0x00000002; - errorCode_ = input.readUInt32(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - msg_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required .NamenodeRegistrationProto registration = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { - if (registrationBuilder_ == null) { - return registration_; - } else { - return registrationBuilder_.getMessage(); - } - } - public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - registration_ = value; - onChanged(); - } else { - registrationBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setRegistration( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) { - if (registrationBuilder_ == null) { - registration_ = builderForValue.build(); - onChanged(); - } else { - registrationBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) { - registration_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); - } else { - registration_ = value; - } - onChanged(); - } else { - registrationBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearRegistration() { - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - onChanged(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegistrationFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - if (registrationBuilder_ != null) { - return registrationBuilder_.getMessageOrBuilder(); - } else { - return registration_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> - getRegistrationFieldBuilder() { - if (registrationBuilder_ == null) { - registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>( - registration_, - getParentForChildren(), - isClean()); - registration_ = null; - } - return registrationBuilder_; - } - - // required uint32 errorCode = 2; - private int errorCode_ ; - public boolean hasErrorCode() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getErrorCode() { - return errorCode_; - } - public Builder setErrorCode(int value) { - bitField0_ |= 0x00000002; - errorCode_ = value; - onChanged(); - return this; - } - public Builder clearErrorCode() { - bitField0_ = (bitField0_ & ~0x00000002); - errorCode_ = 0; - onChanged(); - return this; - } - - // required string msg = 3; - private java.lang.Object msg_ = ""; - public boolean hasMsg() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public String getMsg() { - java.lang.Object ref = msg_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - msg_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setMsg(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - msg_ = value; - onChanged(); - return this; - } - public Builder clearMsg() { - bitField0_ = (bitField0_ & ~0x00000004); - msg_ = getDefaultInstance().getMsg(); - onChanged(); - return this; - } - void setMsg(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000004; - msg_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:ErrorReportRequestProto) - } - - static { - defaultInstance = new ErrorReportRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ErrorReportRequestProto) - } - - public interface ErrorReportResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class ErrorReportResponseProto extends - com.google.protobuf.GeneratedMessage - implements ErrorReportResponseProtoOrBuilder { - // Use ErrorReportResponseProto.newBuilder() to construct. - private ErrorReportResponseProto(Builder builder) { - super(builder); - } - private ErrorReportResponseProto(boolean noInit) {} - - private static final ErrorReportResponseProto defaultInstance; - public static ErrorReportResponseProto getDefaultInstance() { - return defaultInstance; - } - - public ErrorReportResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:ErrorReportResponseProto) - } - - static { - defaultInstance = new ErrorReportResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ErrorReportResponseProto) - } - - public interface RegisterRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .NamenodeRegistrationProto registration = 1; - boolean hasRegistration(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); - } - public static final class RegisterRequestProto extends - com.google.protobuf.GeneratedMessage - implements RegisterRequestProtoOrBuilder { - // Use RegisterRequestProto.newBuilder() to construct. - private RegisterRequestProto(Builder builder) { - super(builder); - } - private RegisterRequestProto(boolean noInit) {} - - private static final RegisterRequestProto defaultInstance; - public static RegisterRequestProto getDefaultInstance() { - return defaultInstance; - } - - public RegisterRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .NamenodeRegistrationProto registration = 1; - public static final int REGISTRATION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { - return registration_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - return registration_; - } - - private void initFields() { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRegistration()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegistration().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, registration_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, registration_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto) obj; - - boolean result = true; - result = result && (hasRegistration() == other.hasRegistration()); - if (hasRegistration()) { - result = result && getRegistration() - .equals(other.getRegistration()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegistration()) { - hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; - hash = (53 * hash) + getRegistration().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegistrationFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (registrationBuilder_ == null) { - result.registration_ = registration_; - } else { - result.registration_ = registrationBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.getDefaultInstance()) return this; - if (other.hasRegistration()) { - mergeRegistration(other.getRegistration()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRegistration()) { - - return false; - } - if (!getRegistration().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(); - if (hasRegistration()) { - subBuilder.mergeFrom(getRegistration()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegistration(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .NamenodeRegistrationProto registration = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { - if (registrationBuilder_ == null) { - return registration_; - } else { - return registrationBuilder_.getMessage(); - } - } - public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - registration_ = value; - onChanged(); - } else { - registrationBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setRegistration( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) { - if (registrationBuilder_ == null) { - registration_ = builderForValue.build(); - onChanged(); - } else { - registrationBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) { - registration_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); - } else { - registration_ = value; - } - onChanged(); - } else { - registrationBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearRegistration() { - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - onChanged(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegistrationFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - if (registrationBuilder_ != null) { - return registrationBuilder_.getMessageOrBuilder(); - } else { - return registration_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> - getRegistrationFieldBuilder() { - if (registrationBuilder_ == null) { - registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>( - registration_, - getParentForChildren(), - isClean()); - registration_ = null; - } - return registrationBuilder_; - } - - // @@protoc_insertion_point(builder_scope:RegisterRequestProto) - } - - static { - defaultInstance = new RegisterRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RegisterRequestProto) - } - - public interface RegisterResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .NamenodeRegistrationProto registration = 1; - boolean hasRegistration(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); - } - public static final class RegisterResponseProto extends - com.google.protobuf.GeneratedMessage - implements RegisterResponseProtoOrBuilder { - // Use RegisterResponseProto.newBuilder() to construct. - private RegisterResponseProto(Builder builder) { - super(builder); - } - private RegisterResponseProto(boolean noInit) {} - - private static final RegisterResponseProto defaultInstance; - public static RegisterResponseProto getDefaultInstance() { - return defaultInstance; - } - - public RegisterResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .NamenodeRegistrationProto registration = 1; - public static final int REGISTRATION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { - return registration_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - return registration_; - } - - private void initFields() { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRegistration()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegistration().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, registration_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, registration_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto) obj; - - boolean result = true; - result = result && (hasRegistration() == other.hasRegistration()); - if (hasRegistration()) { - result = result && getRegistration() - .equals(other.getRegistration()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegistration()) { - hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; - hash = (53 * hash) + getRegistration().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegistrationFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (registrationBuilder_ == null) { - result.registration_ = registration_; - } else { - result.registration_ = registrationBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance()) return this; - if (other.hasRegistration()) { - mergeRegistration(other.getRegistration()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRegistration()) { - - return false; - } - if (!getRegistration().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(); - if (hasRegistration()) { - subBuilder.mergeFrom(getRegistration()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegistration(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .NamenodeRegistrationProto registration = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { - if (registrationBuilder_ == null) { - return registration_; - } else { - return registrationBuilder_.getMessage(); - } - } - public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - registration_ = value; - onChanged(); - } else { - registrationBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setRegistration( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) { - if (registrationBuilder_ == null) { - registration_ = builderForValue.build(); - onChanged(); - } else { - registrationBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) { - registration_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); - } else { - registration_ = value; - } - onChanged(); - } else { - registrationBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearRegistration() { - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - onChanged(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegistrationFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - if (registrationBuilder_ != null) { - return registrationBuilder_.getMessageOrBuilder(); - } else { - return registration_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> - getRegistrationFieldBuilder() { - if (registrationBuilder_ == null) { - registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>( - registration_, - getParentForChildren(), - isClean()); - registration_ = null; - } - return registrationBuilder_; - } - - // @@protoc_insertion_point(builder_scope:RegisterResponseProto) - } - - static { - defaultInstance = new RegisterResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RegisterResponseProto) - } - - public interface StartCheckpointRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .NamenodeRegistrationProto registration = 1; - boolean hasRegistration(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); - } - public static final class StartCheckpointRequestProto extends - com.google.protobuf.GeneratedMessage - implements StartCheckpointRequestProtoOrBuilder { - // Use StartCheckpointRequestProto.newBuilder() to construct. - private StartCheckpointRequestProto(Builder builder) { - super(builder); - } - private StartCheckpointRequestProto(boolean noInit) {} - - private static final StartCheckpointRequestProto defaultInstance; - public static StartCheckpointRequestProto getDefaultInstance() { - return defaultInstance; - } - - public StartCheckpointRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .NamenodeRegistrationProto registration = 1; - public static final int REGISTRATION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { - return registration_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - return registration_; - } - - private void initFields() { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRegistration()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegistration().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, registration_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, registration_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto) obj; - - boolean result = true; - result = result && (hasRegistration() == other.hasRegistration()); - if (hasRegistration()) { - result = result && getRegistration() - .equals(other.getRegistration()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegistration()) { - hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; - hash = (53 * hash) + getRegistration().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegistrationFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (registrationBuilder_ == null) { - result.registration_ = registration_; - } else { - result.registration_ = registrationBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.getDefaultInstance()) return this; - if (other.hasRegistration()) { - mergeRegistration(other.getRegistration()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRegistration()) { - - return false; - } - if (!getRegistration().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(); - if (hasRegistration()) { - subBuilder.mergeFrom(getRegistration()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegistration(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .NamenodeRegistrationProto registration = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { - if (registrationBuilder_ == null) { - return registration_; - } else { - return registrationBuilder_.getMessage(); - } - } - public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - registration_ = value; - onChanged(); - } else { - registrationBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setRegistration( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) { - if (registrationBuilder_ == null) { - registration_ = builderForValue.build(); - onChanged(); - } else { - registrationBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) { - registration_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); - } else { - registration_ = value; - } - onChanged(); - } else { - registrationBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearRegistration() { - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - onChanged(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegistrationFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - if (registrationBuilder_ != null) { - return registrationBuilder_.getMessageOrBuilder(); - } else { - return registration_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> - getRegistrationFieldBuilder() { - if (registrationBuilder_ == null) { - registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>( - registration_, - getParentForChildren(), - isClean()); - registration_ = null; - } - return registrationBuilder_; - } - - // @@protoc_insertion_point(builder_scope:StartCheckpointRequestProto) - } - - static { - defaultInstance = new StartCheckpointRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:StartCheckpointRequestProto) - } - - public interface StartCheckpointResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .NamenodeCommandProto command = 1; - boolean hasCommand(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto getCommand(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProtoOrBuilder getCommandOrBuilder(); - } - public static final class StartCheckpointResponseProto extends - com.google.protobuf.GeneratedMessage - implements StartCheckpointResponseProtoOrBuilder { - // Use StartCheckpointResponseProto.newBuilder() to construct. - private StartCheckpointResponseProto(Builder builder) { - super(builder); - } - private StartCheckpointResponseProto(boolean noInit) {} - - private static final StartCheckpointResponseProto defaultInstance; - public static StartCheckpointResponseProto getDefaultInstance() { - return defaultInstance; - } - - public StartCheckpointResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .NamenodeCommandProto command = 1; - public static final int COMMAND_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto command_; - public boolean hasCommand() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto getCommand() { - return command_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProtoOrBuilder getCommandOrBuilder() { - return command_; - } - - private void initFields() { - command_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasCommand()) { - memoizedIsInitialized = 0; - return false; - } - if (!getCommand().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, command_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, command_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto) obj; - - boolean result = true; - result = result && (hasCommand() == other.hasCommand()); - if (hasCommand()) { - result = result && getCommand() - .equals(other.getCommand()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasCommand()) { - hash = (37 * hash) + COMMAND_FIELD_NUMBER; - hash = (53 * hash) + getCommand().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getCommandFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (commandBuilder_ == null) { - command_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance(); - } else { - commandBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (commandBuilder_ == null) { - result.command_ = command_; - } else { - result.command_ = commandBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance()) return this; - if (other.hasCommand()) { - mergeCommand(other.getCommand()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasCommand()) { - - return false; - } - if (!getCommand().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.newBuilder(); - if (hasCommand()) { - subBuilder.mergeFrom(getCommand()); - } - input.readMessage(subBuilder, extensionRegistry); - setCommand(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .NamenodeCommandProto command = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto command_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProtoOrBuilder> commandBuilder_; - public boolean hasCommand() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto getCommand() { - if (commandBuilder_ == null) { - return command_; - } else { - return commandBuilder_.getMessage(); - } - } - public Builder setCommand(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto value) { - if (commandBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - command_ = value; - onChanged(); - } else { - commandBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setCommand( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder builderForValue) { - if (commandBuilder_ == null) { - command_ = builderForValue.build(); - onChanged(); - } else { - commandBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeCommand(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto value) { - if (commandBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - command_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance()) { - command_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.newBuilder(command_).mergeFrom(value).buildPartial(); - } else { - command_ = value; - } - onChanged(); - } else { - commandBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearCommand() { - if (commandBuilder_ == null) { - command_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance(); - onChanged(); - } else { - commandBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder getCommandBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getCommandFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProtoOrBuilder getCommandOrBuilder() { - if (commandBuilder_ != null) { - return commandBuilder_.getMessageOrBuilder(); - } else { - return command_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProtoOrBuilder> - getCommandFieldBuilder() { - if (commandBuilder_ == null) { - commandBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProtoOrBuilder>( - command_, - getParentForChildren(), - isClean()); - command_ = null; - } - return commandBuilder_; - } - - // @@protoc_insertion_point(builder_scope:StartCheckpointResponseProto) - } - - static { - defaultInstance = new StartCheckpointResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:StartCheckpointResponseProto) - } - - public interface EndCheckpointRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .NamenodeRegistrationProto registration = 1; - boolean hasRegistration(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); - - // required .CheckpointSignatureProto signature = 2; - boolean hasSignature(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder(); - } - public static final class EndCheckpointRequestProto extends - com.google.protobuf.GeneratedMessage - implements EndCheckpointRequestProtoOrBuilder { - // Use EndCheckpointRequestProto.newBuilder() to construct. - private EndCheckpointRequestProto(Builder builder) { - super(builder); - } - private EndCheckpointRequestProto(boolean noInit) {} - - private static final EndCheckpointRequestProto defaultInstance; - public static EndCheckpointRequestProto getDefaultInstance() { - return defaultInstance; - } - - public EndCheckpointRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required .NamenodeRegistrationProto registration = 1; - public static final int REGISTRATION_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { - return registration_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - return registration_; - } - - // required .CheckpointSignatureProto signature = 2; - public static final int SIGNATURE_FIELD_NUMBER = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto signature_; - public boolean hasSignature() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature() { - return signature_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { - return signature_; - } - - private void initFields() { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRegistration()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasSignature()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegistration().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - if (!getSignature().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, registration_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, signature_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, registration_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, signature_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto) obj; - - boolean result = true; - result = result && (hasRegistration() == other.hasRegistration()); - if (hasRegistration()) { - result = result && getRegistration() - .equals(other.getRegistration()); - } - result = result && (hasSignature() == other.hasSignature()); - if (hasSignature()) { - result = result && getSignature() - .equals(other.getSignature()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegistration()) { - hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; - hash = (53 * hash) + getRegistration().hashCode(); - } - if (hasSignature()) { - hash = (37 * hash) + SIGNATURE_FIELD_NUMBER; - hash = (53 * hash) + getSignature().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegistrationFieldBuilder(); - getSignatureFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - if (signatureBuilder_ == null) { - signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); - } else { - signatureBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (registrationBuilder_ == null) { - result.registration_ = registration_; - } else { - result.registration_ = registrationBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (signatureBuilder_ == null) { - result.signature_ = signature_; - } else { - result.signature_ = signatureBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.getDefaultInstance()) return this; - if (other.hasRegistration()) { - mergeRegistration(other.getRegistration()); - } - if (other.hasSignature()) { - mergeSignature(other.getSignature()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRegistration()) { - - return false; - } - if (!hasSignature()) { - - return false; - } - if (!getRegistration().isInitialized()) { - - return false; - } - if (!getSignature().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(); - if (hasRegistration()) { - subBuilder.mergeFrom(getRegistration()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegistration(subBuilder.buildPartial()); - break; - } - case 18: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder(); - if (hasSignature()) { - subBuilder.mergeFrom(getSignature()); - } - input.readMessage(subBuilder, extensionRegistry); - setSignature(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .NamenodeRegistrationProto registration = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; - public boolean hasRegistration() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { - if (registrationBuilder_ == null) { - return registration_; - } else { - return registrationBuilder_.getMessage(); - } - } - public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - registration_ = value; - onChanged(); - } else { - registrationBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setRegistration( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) { - if (registrationBuilder_ == null) { - registration_ = builderForValue.build(); - onChanged(); - } else { - registrationBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { - if (registrationBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) { - registration_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); - } else { - registration_ = value; - } - onChanged(); - } else { - registrationBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearRegistration() { - if (registrationBuilder_ == null) { - registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); - onChanged(); - } else { - registrationBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRegistrationFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { - if (registrationBuilder_ != null) { - return registrationBuilder_.getMessageOrBuilder(); - } else { - return registration_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> - getRegistrationFieldBuilder() { - if (registrationBuilder_ == null) { - registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>( - registration_, - getParentForChildren(), - isClean()); - registration_ = null; - } - return registrationBuilder_; - } - - // required .CheckpointSignatureProto signature = 2; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder> signatureBuilder_; - public boolean hasSignature() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature() { - if (signatureBuilder_ == null) { - return signature_; - } else { - return signatureBuilder_.getMessage(); - } - } - public Builder setSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto value) { - if (signatureBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - signature_ = value; - onChanged(); - } else { - signatureBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setSignature( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder builderForValue) { - if (signatureBuilder_ == null) { - signature_ = builderForValue.build(); - onChanged(); - } else { - signatureBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergeSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto value) { - if (signatureBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - signature_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance()) { - signature_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder(signature_).mergeFrom(value).buildPartial(); - } else { - signature_ = value; - } - onChanged(); - } else { - signatureBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearSignature() { - if (signatureBuilder_ == null) { - signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); - onChanged(); - } else { - signatureBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder getSignatureBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getSignatureFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { - if (signatureBuilder_ != null) { - return signatureBuilder_.getMessageOrBuilder(); - } else { - return signature_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder> - getSignatureFieldBuilder() { - if (signatureBuilder_ == null) { - signatureBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder>( - signature_, - getParentForChildren(), - isClean()); - signature_ = null; - } - return signatureBuilder_; - } - - // @@protoc_insertion_point(builder_scope:EndCheckpointRequestProto) - } - - static { - defaultInstance = new EndCheckpointRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:EndCheckpointRequestProto) - } - - public interface EndCheckpointResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - public static final class EndCheckpointResponseProto extends - com.google.protobuf.GeneratedMessage - implements EndCheckpointResponseProtoOrBuilder { - // Use EndCheckpointResponseProto.newBuilder() to construct. - private EndCheckpointResponseProto(Builder builder) { - super(builder); - } - private EndCheckpointResponseProto(boolean noInit) {} - - private static final EndCheckpointResponseProto defaultInstance; - public static EndCheckpointResponseProto getDefaultInstance() { - return defaultInstance; - } - - public EndCheckpointResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointResponseProto_fieldAccessorTable; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - } - } - } - - - // @@protoc_insertion_point(builder_scope:EndCheckpointResponseProto) - } - - static { - defaultInstance = new EndCheckpointResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:EndCheckpointResponseProto) - } - - public interface GetEditLogManifestRequestProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint64 sinceTxId = 1; - boolean hasSinceTxId(); - long getSinceTxId(); - } - public static final class GetEditLogManifestRequestProto extends - com.google.protobuf.GeneratedMessage - implements GetEditLogManifestRequestProtoOrBuilder { - // Use GetEditLogManifestRequestProto.newBuilder() to construct. - private GetEditLogManifestRequestProto(Builder builder) { - super(builder); - } - private GetEditLogManifestRequestProto(boolean noInit) {} - - private static final GetEditLogManifestRequestProto defaultInstance; - public static GetEditLogManifestRequestProto getDefaultInstance() { - return defaultInstance; - } - - public GetEditLogManifestRequestProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestRequestProto_fieldAccessorTable; - } - - private int bitField0_; - // required uint64 sinceTxId = 1; - public static final int SINCETXID_FIELD_NUMBER = 1; - private long sinceTxId_; - public boolean hasSinceTxId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getSinceTxId() { - return sinceTxId_; - } - - private void initFields() { - sinceTxId_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSinceTxId()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, sinceTxId_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, sinceTxId_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto) obj; - - boolean result = true; - result = result && (hasSinceTxId() == other.hasSinceTxId()); - if (hasSinceTxId()) { - result = result && (getSinceTxId() - == other.getSinceTxId()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSinceTxId()) { - hash = (37 * hash) + SINCETXID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getSinceTxId()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestRequestProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestRequestProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - sinceTxId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.sinceTxId_ = sinceTxId_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance()) return this; - if (other.hasSinceTxId()) { - setSinceTxId(other.getSinceTxId()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSinceTxId()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - sinceTxId_ = input.readUInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required uint64 sinceTxId = 1; - private long sinceTxId_ ; - public boolean hasSinceTxId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getSinceTxId() { - return sinceTxId_; - } - public Builder setSinceTxId(long value) { - bitField0_ |= 0x00000001; - sinceTxId_ = value; - onChanged(); - return this; - } - public Builder clearSinceTxId() { - bitField0_ = (bitField0_ & ~0x00000001); - sinceTxId_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:GetEditLogManifestRequestProto) - } - - static { - defaultInstance = new GetEditLogManifestRequestProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetEditLogManifestRequestProto) - } - - public interface GetEditLogManifestResponseProtoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .RemoteEditLogManifestProto manifest = 1; - boolean hasManifest(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest(); - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder(); - } - public static final class GetEditLogManifestResponseProto extends - com.google.protobuf.GeneratedMessage - implements GetEditLogManifestResponseProtoOrBuilder { - // Use GetEditLogManifestResponseProto.newBuilder() to construct. - private GetEditLogManifestResponseProto(Builder builder) { - super(builder); - } - private GetEditLogManifestResponseProto(boolean noInit) {} - - private static final GetEditLogManifestResponseProto defaultInstance; - public static GetEditLogManifestResponseProto getDefaultInstance() { - return defaultInstance; - } - - public GetEditLogManifestResponseProto getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestResponseProto_fieldAccessorTable; - } - - private int bitField0_; - // required .RemoteEditLogManifestProto manifest = 1; - public static final int MANIFEST_FIELD_NUMBER = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_; - public boolean hasManifest() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() { - return manifest_; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() { - return manifest_; - } - - private void initFields() { - manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasManifest()) { - memoizedIsInitialized = 0; - return false; - } - if (!getManifest().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, manifest_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, manifest_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto)) { - return super.equals(obj); - } - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto) obj; - - boolean result = true; - result = result && (hasManifest() == other.hasManifest()); - if (hasManifest()) { - result = result && getManifest() - .equals(other.getManifest()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasManifest()) { - hash = (37 * hash) + MANIFEST_FIELD_NUMBER; - hash = (53 * hash) + getManifest().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProtoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestResponseProto_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestResponseProto_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getManifestFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (manifestBuilder_ == null) { - manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance(); - } else { - manifestBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDescriptor(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto getDefaultInstanceForType() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto build() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto buildPartial() { - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (manifestBuilder_ == null) { - result.manifest_ = manifest_; - } else { - result.manifest_ = manifestBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto) { - return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto other) { - if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()) return this; - if (other.hasManifest()) { - mergeManifest(other.getManifest()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasManifest()) { - - return false; - } - if (!getManifest().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder(); - if (hasManifest()) { - subBuilder.mergeFrom(getManifest()); - } - input.readMessage(subBuilder, extensionRegistry); - setManifest(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .RemoteEditLogManifestProto manifest = 1; - private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> manifestBuilder_; - public boolean hasManifest() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() { - if (manifestBuilder_ == null) { - return manifest_; - } else { - return manifestBuilder_.getMessage(); - } - } - public Builder setManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) { - if (manifestBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - manifest_ = value; - onChanged(); - } else { - manifestBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setManifest( - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder builderForValue) { - if (manifestBuilder_ == null) { - manifest_ = builderForValue.build(); - onChanged(); - } else { - manifestBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) { - if (manifestBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - manifest_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance()) { - manifest_ = - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder(manifest_).mergeFrom(value).buildPartial(); - } else { - manifest_ = value; - } - onChanged(); - } else { - manifestBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearManifest() { - if (manifestBuilder_ == null) { - manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance(); - onChanged(); - } else { - manifestBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder getManifestBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getManifestFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() { - if (manifestBuilder_ != null) { - return manifestBuilder_.getMessageOrBuilder(); - } else { - return manifest_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> - getManifestFieldBuilder() { - if (manifestBuilder_ == null) { - manifestBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder>( - manifest_, - getParentForChildren(), - isClean()); - manifest_ = null; - } - return manifestBuilder_; - } - - // @@protoc_insertion_point(builder_scope:GetEditLogManifestResponseProto) - } - - static { - defaultInstance = new GetEditLogManifestResponseProto(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:GetEditLogManifestResponseProto) - } - - public static abstract class NamenodeProtocolService - implements com.google.protobuf.Service { - protected NamenodeProtocolService() {} - - public interface Interface { - public abstract void getBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getBlockKeys( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getTransationId( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void rollEditLog( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void versionRequest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void errorReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void register( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void startCheckpoint( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void endCheckpoint( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getEditLogManifest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request, - com.google.protobuf.RpcCallback done); - - } - - public static com.google.protobuf.Service newReflectiveService( - final Interface impl) { - return new NamenodeProtocolService() { - @java.lang.Override - public void getBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getBlocks(controller, request, done); - } - - @java.lang.Override - public void getBlockKeys( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getBlockKeys(controller, request, done); - } - - @java.lang.Override - public void getTransationId( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getTransationId(controller, request, done); - } - - @java.lang.Override - public void rollEditLog( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.rollEditLog(controller, request, done); - } - - @java.lang.Override - public void versionRequest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.versionRequest(controller, request, done); - } - - @java.lang.Override - public void errorReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.errorReport(controller, request, done); - } - - @java.lang.Override - public void register( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.register(controller, request, done); - } - - @java.lang.Override - public void startCheckpoint( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.startCheckpoint(controller, request, done); - } - - @java.lang.Override - public void endCheckpoint( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.endCheckpoint(controller, request, done); - } - - @java.lang.Override - public void getEditLogManifest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request, - com.google.protobuf.RpcCallback done) { - impl.getEditLogManifest(controller, request, done); - } - - }; - } - - public static com.google.protobuf.BlockingService - newReflectiveBlockingService(final BlockingInterface impl) { - return new com.google.protobuf.BlockingService() { - public final com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptorForType() { - return getDescriptor(); - } - - public final com.google.protobuf.Message callBlockingMethod( - com.google.protobuf.Descriptors.MethodDescriptor method, - com.google.protobuf.RpcController controller, - com.google.protobuf.Message request) - throws com.google.protobuf.ServiceException { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.callBlockingMethod() given method descriptor for " + - "wrong service type."); - } - switch(method.getIndex()) { - case 0: - return impl.getBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto)request); - case 1: - return impl.getBlockKeys(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto)request); - case 2: - return impl.getTransationId(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto)request); - case 3: - return impl.rollEditLog(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto)request); - case 4: - return impl.versionRequest(controller, (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto)request); - case 5: - return impl.errorReport(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto)request); - case 6: - return impl.register(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto)request); - case 7: - return impl.startCheckpoint(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto)request); - case 8: - return impl.endCheckpoint(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto)request); - case 9: - return impl.getEditLogManifest(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto)request); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getRequestPrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getRequestPrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDefaultInstance(); - case 4: - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.getDefaultInstance(); - case 5: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance(); - case 6: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.getDefaultInstance(); - case 7: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.getDefaultInstance(); - case 8: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.getDefaultInstance(); - case 9: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getResponsePrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getResponsePrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance(); - case 4: - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance(); - case 5: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(); - case 6: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance(); - case 7: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance(); - case 8: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance(); - case 9: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - }; - } - - public abstract void getBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getBlockKeys( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getTransationId( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void rollEditLog( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void versionRequest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void errorReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void register( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void startCheckpoint( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void endCheckpoint( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request, - com.google.protobuf.RpcCallback done); - - public abstract void getEditLogManifest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request, - com.google.protobuf.RpcCallback done); - - public static final - com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptor() { - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.getDescriptor().getServices().get(0); - } - public final com.google.protobuf.Descriptors.ServiceDescriptor - getDescriptorForType() { - return getDescriptor(); - } - - public final void callMethod( - com.google.protobuf.Descriptors.MethodDescriptor method, - com.google.protobuf.RpcController controller, - com.google.protobuf.Message request, - com.google.protobuf.RpcCallback< - com.google.protobuf.Message> done) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.callMethod() given method descriptor for wrong " + - "service type."); - } - switch(method.getIndex()) { - case 0: - this.getBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 1: - this.getBlockKeys(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 2: - this.getTransationId(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 3: - this.rollEditLog(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 4: - this.versionRequest(controller, (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 5: - this.errorReport(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 6: - this.register(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 7: - this.startCheckpoint(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 8: - this.endCheckpoint(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 9: - this.getEditLogManifest(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getRequestPrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getRequestPrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDefaultInstance(); - case 4: - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto.getDefaultInstance(); - case 5: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance(); - case 6: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.getDefaultInstance(); - case 7: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.getDefaultInstance(); - case 8: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.getDefaultInstance(); - case 9: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public final com.google.protobuf.Message - getResponsePrototype( - com.google.protobuf.Descriptors.MethodDescriptor method) { - if (method.getService() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "Service.getResponsePrototype() given method " + - "descriptor for wrong service type."); - } - switch(method.getIndex()) { - case 0: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance(); - case 1: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance(); - case 2: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance(); - case 3: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance(); - case 4: - return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance(); - case 5: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(); - case 6: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance(); - case 7: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance(); - case 8: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance(); - case 9: - return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); - default: - throw new java.lang.AssertionError("Can't get here."); - } - } - - public static Stub newStub( - com.google.protobuf.RpcChannel channel) { - return new Stub(channel); - } - - public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService implements Interface { - private Stub(com.google.protobuf.RpcChannel channel) { - this.channel = channel; - } - - private final com.google.protobuf.RpcChannel channel; - - public com.google.protobuf.RpcChannel getChannel() { - return channel; - } - - public void getBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(0), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance())); - } - - public void getBlockKeys( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(1), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance())); - } - - public void getTransationId( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(2), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance())); - } - - public void rollEditLog( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(3), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance())); - } - - public void versionRequest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(4), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance())); - } - - public void errorReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(5), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance())); - } - - public void register( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(6), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance())); - } - - public void startCheckpoint( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(7), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance())); - } - - public void endCheckpoint( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(8), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance())); - } - - public void getEditLogManifest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(9), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance())); - } - } - - public static BlockingInterface newBlockingStub( - com.google.protobuf.BlockingRpcChannel channel) { - return new BlockingStub(channel); - } - - public interface BlockingInterface { - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto getBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto getBlockKeys( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto getTransationId( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto rollEditLog( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto versionRequest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto errorReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto register( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto startCheckpoint( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto endCheckpoint( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request) - throws com.google.protobuf.ServiceException; - } - - private static final class BlockingStub implements BlockingInterface { - private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { - this.channel = channel; - } - - private final com.google.protobuf.BlockingRpcChannel channel; - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto getBlocks( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(0), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto getBlockKeys( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(1), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto getTransationId( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(2), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto rollEditLog( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(3), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto versionRequest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(4), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto errorReport( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(5), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto register( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(6), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto startCheckpoint( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(7), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto endCheckpoint( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(8), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance()); - } - - - public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto) channel.callBlockingMethod( - getDescriptor().getMethods().get(9), - controller, - request, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()); - } - - } - } - - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetBlocksRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetBlocksRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetBlocksResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetBlocksResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetBlockKeysRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetBlockKeysRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetBlockKeysResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetBlockKeysResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetTransactionIdRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetTransactionIdRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetTransactionIdResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetTransactionIdResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RollEditLogRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RollEditLogRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RollEditLogResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RollEditLogResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ErrorReportRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ErrorReportRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ErrorReportResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ErrorReportResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RegisterRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RegisterRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RegisterResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RegisterResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_StartCheckpointRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_StartCheckpointRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_StartCheckpointResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_StartCheckpointResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_EndCheckpointRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_EndCheckpointRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_EndCheckpointResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_EndCheckpointResponseProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetEditLogManifestRequestProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetEditLogManifestRequestProto_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetEditLogManifestResponseProto_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetEditLogManifestResponseProto_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; - } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n\026NamenodeProtocol.proto\032\nhdfs.proto\"I\n\025" + - "GetBlocksRequestProto\022\"\n\010datanode\030\001 \002(\0132" + - "\020.DatanodeIDProto\022\014\n\004size\030\002 \002(\004\"C\n\026GetBl" + - "ocksResponseProto\022)\n\006blocks\030\001 \002(\0132\031.Bloc" + - "ksWithLocationsProto\"\032\n\030GetBlockKeysRequ" + - "estProto\"B\n\031GetBlockKeysResponseProto\022%\n" + - "\004keys\030\001 \002(\0132\027.ExportedBlockKeysProto\"\036\n\034" + - "GetTransactionIdRequestProto\"-\n\035GetTrans" + - "actionIdResponseProto\022\014\n\004txId\030\001 \002(\004\"\031\n\027R" + - "ollEditLogRequestProto\"H\n\030RollEditLogRes", - "ponseProto\022,\n\tsignature\030\001 \002(\0132\031.Checkpoi" + - "ntSignatureProto\"k\n\027ErrorReportRequestPr" + - "oto\0220\n\014registration\030\001 \002(\0132\032.NamenodeRegi" + - "strationProto\022\021\n\terrorCode\030\002 \002(\r\022\013\n\003msg\030" + - "\003 \002(\t\"\032\n\030ErrorReportResponseProto\"H\n\024Reg" + - "isterRequestProto\0220\n\014registration\030\001 \002(\0132" + - "\032.NamenodeRegistrationProto\"I\n\025RegisterR" + - "esponseProto\0220\n\014registration\030\001 \002(\0132\032.Nam" + - "enodeRegistrationProto\"O\n\033StartCheckpoin" + - "tRequestProto\0220\n\014registration\030\001 \002(\0132\032.Na", - "menodeRegistrationProto\"F\n\034StartCheckpoi" + - "ntResponseProto\022&\n\007command\030\001 \002(\0132\025.Namen" + - "odeCommandProto\"{\n\031EndCheckpointRequestP" + - "roto\0220\n\014registration\030\001 \002(\0132\032.NamenodeReg" + - "istrationProto\022,\n\tsignature\030\002 \002(\0132\031.Chec" + - "kpointSignatureProto\"\034\n\032EndCheckpointRes" + - "ponseProto\"3\n\036GetEditLogManifestRequestP" + - "roto\022\021\n\tsinceTxId\030\001 \002(\004\"P\n\037GetEditLogMan" + - "ifestResponseProto\022-\n\010manifest\030\001 \002(\0132\033.R" + - "emoteEditLogManifestProto2\345\005\n\027NamenodePr", - "otocolService\022<\n\tgetBlocks\022\026.GetBlocksRe" + - "questProto\032\027.GetBlocksResponseProto\022E\n\014g" + - "etBlockKeys\022\031.GetBlockKeysRequestProto\032\032" + - ".GetBlockKeysResponseProto\022P\n\017getTransat" + - "ionId\022\035.GetTransactionIdRequestProto\032\036.G" + - "etTransactionIdResponseProto\022B\n\013rollEdit" + - "Log\022\030.RollEditLogRequestProto\032\031.RollEdit" + - "LogResponseProto\022=\n\016versionRequest\022\024.Ver" + - "sionRequestProto\032\025.VersionResponseProto\022" + - "B\n\013errorReport\022\030.ErrorReportRequestProto", - "\032\031.ErrorReportResponseProto\0229\n\010register\022" + - "\025.RegisterRequestProto\032\026.RegisterRespons" + - "eProto\022N\n\017startCheckpoint\022\034.StartCheckpo" + - "intRequestProto\032\035.StartCheckpointRespons" + - "eProto\022H\n\rendCheckpoint\022\032.EndCheckpointR" + - "equestProto\032\033.EndCheckpointResponseProto" + - "\022W\n\022getEditLogManifest\022\037.GetEditLogManif" + - "estRequestProto\032 .GetEditLogManifestResp" + - "onseProtoBE\n%org.apache.hadoop.hdfs.prot" + - "ocol.protoB\026NamenodeProtocolProtos\210\001\001\240\001\001" - }; - com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { - public com.google.protobuf.ExtensionRegistry assignDescriptors( - com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - internal_static_GetBlocksRequestProto_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_GetBlocksRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetBlocksRequestProto_descriptor, - new java.lang.String[] { "Datanode", "Size", }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.Builder.class); - internal_static_GetBlocksResponseProto_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_GetBlocksResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetBlocksResponseProto_descriptor, - new java.lang.String[] { "Blocks", }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.Builder.class); - internal_static_GetBlockKeysRequestProto_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_GetBlockKeysRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetBlockKeysRequestProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.Builder.class); - internal_static_GetBlockKeysResponseProto_descriptor = - getDescriptor().getMessageTypes().get(3); - internal_static_GetBlockKeysResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetBlockKeysResponseProto_descriptor, - new java.lang.String[] { "Keys", }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.Builder.class); - internal_static_GetTransactionIdRequestProto_descriptor = - getDescriptor().getMessageTypes().get(4); - internal_static_GetTransactionIdRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetTransactionIdRequestProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.Builder.class); - internal_static_GetTransactionIdResponseProto_descriptor = - getDescriptor().getMessageTypes().get(5); - internal_static_GetTransactionIdResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetTransactionIdResponseProto_descriptor, - new java.lang.String[] { "TxId", }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.Builder.class); - internal_static_RollEditLogRequestProto_descriptor = - getDescriptor().getMessageTypes().get(6); - internal_static_RollEditLogRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RollEditLogRequestProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.Builder.class); - internal_static_RollEditLogResponseProto_descriptor = - getDescriptor().getMessageTypes().get(7); - internal_static_RollEditLogResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RollEditLogResponseProto_descriptor, - new java.lang.String[] { "Signature", }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.Builder.class); - internal_static_ErrorReportRequestProto_descriptor = - getDescriptor().getMessageTypes().get(8); - internal_static_ErrorReportRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ErrorReportRequestProto_descriptor, - new java.lang.String[] { "Registration", "ErrorCode", "Msg", }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.Builder.class); - internal_static_ErrorReportResponseProto_descriptor = - getDescriptor().getMessageTypes().get(9); - internal_static_ErrorReportResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ErrorReportResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.Builder.class); - internal_static_RegisterRequestProto_descriptor = - getDescriptor().getMessageTypes().get(10); - internal_static_RegisterRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RegisterRequestProto_descriptor, - new java.lang.String[] { "Registration", }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.Builder.class); - internal_static_RegisterResponseProto_descriptor = - getDescriptor().getMessageTypes().get(11); - internal_static_RegisterResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RegisterResponseProto_descriptor, - new java.lang.String[] { "Registration", }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.Builder.class); - internal_static_StartCheckpointRequestProto_descriptor = - getDescriptor().getMessageTypes().get(12); - internal_static_StartCheckpointRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_StartCheckpointRequestProto_descriptor, - new java.lang.String[] { "Registration", }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.Builder.class); - internal_static_StartCheckpointResponseProto_descriptor = - getDescriptor().getMessageTypes().get(13); - internal_static_StartCheckpointResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_StartCheckpointResponseProto_descriptor, - new java.lang.String[] { "Command", }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.Builder.class); - internal_static_EndCheckpointRequestProto_descriptor = - getDescriptor().getMessageTypes().get(14); - internal_static_EndCheckpointRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_EndCheckpointRequestProto_descriptor, - new java.lang.String[] { "Registration", "Signature", }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.Builder.class); - internal_static_EndCheckpointResponseProto_descriptor = - getDescriptor().getMessageTypes().get(15); - internal_static_EndCheckpointResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_EndCheckpointResponseProto_descriptor, - new java.lang.String[] { }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.Builder.class); - internal_static_GetEditLogManifestRequestProto_descriptor = - getDescriptor().getMessageTypes().get(16); - internal_static_GetEditLogManifestRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetEditLogManifestRequestProto_descriptor, - new java.lang.String[] { "SinceTxId", }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.Builder.class); - internal_static_GetEditLogManifestResponseProto_descriptor = - getDescriptor().getMessageTypes().get(17); - internal_static_GetEditLogManifestResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetEditLogManifestResponseProto_descriptor, - new java.lang.String[] { "Manifest", }, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.class, - org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.Builder.class); - return null; - } - }; - com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(), - }, assigner); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/proto/ClientDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/proto/ClientDatanodeProtocol.proto rename to hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/proto/ClientNamenodeProtocol.proto rename to hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/proto/DatanodeProtocol.proto rename to hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/proto/InterDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/proto/InterDatanodeProtocol.proto rename to hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/proto/JournalProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/proto/JournalProtocol.proto rename to hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/proto/NamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/proto/NamenodeProtocol.proto rename to hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/proto/datatransfer.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/proto/datatransfer.proto rename to hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto rename to hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto