diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java index 9141659b514..7813b4aa65c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java @@ -28,8 +28,11 @@ import java.util.TreeSet; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor; +import org.apache.hadoop.hbase.replication.ReplicationLoadSink; +import org.apache.hadoop.hbase.replication.ReplicationLoadSource; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Strings; @@ -52,7 +55,7 @@ public class ServerLoad { private int totalStaticBloomSizeKB = 0; private long totalCompactingKVs = 0; private long currentCompactedKVs = 0; - + public ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) { this.serverLoad = serverLoad; for (ClusterStatusProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) { @@ -70,7 +73,7 @@ public class ServerLoad { totalCompactingKVs += rl.getTotalCompactingKVs(); currentCompactedKVs += rl.getCurrentCompactedKVs(); } - + } // NOTE: Function name cannot start with "get" because then an OpenDataException is thrown because @@ -177,6 +180,26 @@ public class ServerLoad { return serverLoad.getInfoServerPort(); } + /** + * Call directly from client such as hbase shell + * @return the list of ReplicationLoadSource + */ + public List getReplicationLoadSourceList() { + return ProtobufUtil.toReplicationLoadSourceList(serverLoad.getReplLoadSourceList()); + } + + /** + * Call directly from client such as hbase shell + * @return ReplicationLoadSink + */ + public ReplicationLoadSink getReplicationLoadSink() { + if (serverLoad.hasReplLoadSink()) { + return ProtobufUtil.toReplicationLoadSink(serverLoad.getReplLoadSink()); + } else { + return null; + } + } + /** * Originally, this method factored in the effect of requests going to the * server as well. However, this does not interact very well with the current diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index caae1bbbaec..43e91d23550 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -104,6 +104,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Col import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad; import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; @@ -130,6 +131,8 @@ import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; import org.apache.hadoop.hbase.quotas.QuotaScope; import org.apache.hadoop.hbase.quotas.QuotaType; import org.apache.hadoop.hbase.quotas.ThrottleType; +import org.apache.hadoop.hbase.replication.ReplicationLoadSink; +import org.apache.hadoop.hbase.replication.ReplicationLoadSource; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.TablePermission; import org.apache.hadoop.hbase.security.access.UserPermission; @@ -2994,4 +2997,25 @@ public final class ProtobufUtil { return desc.build(); } + + public static ReplicationLoadSink toReplicationLoadSink( + ClusterStatusProtos.ReplicationLoadSink cls) { + return new ReplicationLoadSink(cls.getAgeOfLastAppliedOp(), cls.getTimeStampsOfLastAppliedOp()); + } + + public static ReplicationLoadSource toReplicationLoadSource( + ClusterStatusProtos.ReplicationLoadSource cls) { + return new ReplicationLoadSource(cls.getPeerID(), cls.getAgeOfLastShippedOp(), + cls.getSizeOfLogQueue(), cls.getTimeStampOfLastShippedOp(), cls.getReplicationLag()); + } + + public static List toReplicationLoadSourceList( + List clsList) { + ArrayList rlsList = new ArrayList(); + for (ClusterStatusProtos.ReplicationLoadSource cls : clsList) { + rlsList.add(toReplicationLoadSource(cls)); + } + return rlsList; + } + } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java new file mode 100644 index 00000000000..63fe3349585 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * A HBase ReplicationLoad to present MetricsSink information + */ +@InterfaceAudience.Private +public class ReplicationLoadSink { + private long ageOfLastAppliedOp; + private long timeStampsOfLastAppliedOp; + + public ReplicationLoadSink(long age, long timeStamp) { + this.ageOfLastAppliedOp = age; + this.timeStampsOfLastAppliedOp = timeStamp; + } + + public long getAgeOfLastAppliedOp() { + return this.ageOfLastAppliedOp; + } + + public long getTimeStampsOfLastAppliedOp() { + return this.timeStampsOfLastAppliedOp; + } + +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java new file mode 100644 index 00000000000..bfd15990be1 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * A HBase ReplicationLoad to present MetricsSource information + */ +@InterfaceAudience.Private +public class ReplicationLoadSource { + private String peerID; + private long ageOfLastShippedOp; + private int sizeOfLogQueue; + private long timeStampOfLastShippedOp; + private long replicationLag; + + public ReplicationLoadSource(String id, long age, int size, long timeStamp, long lag) { + this.peerID = id; + this.ageOfLastShippedOp = age; + this.sizeOfLogQueue = size; + this.timeStampOfLastShippedOp = timeStamp; + this.replicationLag = lag; + } + + public String getPeerID() { + return this.peerID; + } + + public long getAgeOfLastShippedOp() { + return this.ageOfLastShippedOp; + } + + public long getSizeOfLogQueue() { + return this.sizeOfLogQueue; + } + + public long getTimeStampOfLastShippedOp() { + return this.timeStampOfLastShippedOp; + } + + public long getReplicationLag() { + return this.replicationLag; + } +} \ No newline at end of file diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java index 805dfcaf537..698a59a2acb 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java @@ -26,4 +26,5 @@ public interface MetricsReplicationSinkSource { void setLastAppliedOpAge(long age); void incrAppliedBatches(long batches); void incrAppliedOps(long batchsize); + long getLastAppliedOpAge(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java index 66d265a90ba..fecf191a063 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java @@ -43,4 +43,5 @@ public interface MetricsReplicationSourceSource { void incrLogReadInBytes(long size); void incrLogReadInEdits(long size); void clear(); + long getLastShippedAge(); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java index a210171577c..6dace107f99 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java @@ -95,4 +95,9 @@ public class MetricsReplicationGlobalSourceSource implements MetricsReplicationS @Override public void clear() { } + + @Override + public long getLastShippedAge() { + return ageOfLastShippedOpGauge.value(); + } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java index 3025e3e7724..14212ba0869 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java @@ -44,4 +44,9 @@ public class MetricsReplicationSinkSourceImpl implements MetricsReplicationSinkS @Override public void incrAppliedOps(long batchsize) { opsCounter.incr(batchsize); } + + @Override + public long getLastAppliedOpAge() { + return ageGauge.value(); + } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java index 89ef4de920a..1422e7e1cd3 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java @@ -125,4 +125,9 @@ public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSou rms.removeMetric(logEditsFilteredKey); } + + @Override + public long getLastShippedAge() { + return ageOfLastShippedOpGauge.value(); + } } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java index 6dc48fa12db..0d69d7a1adb 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java @@ -4438,6 +4438,1455 @@ public final class ClusterStatusProtos { // @@protoc_insertion_point(class_scope:RegionLoad) } + public interface ReplicationLoadSinkOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 ageOfLastAppliedOp = 1; + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + boolean hasAgeOfLastAppliedOp(); + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + long getAgeOfLastAppliedOp(); + + // required uint64 timeStampsOfLastAppliedOp = 2; + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + boolean hasTimeStampsOfLastAppliedOp(); + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + long getTimeStampsOfLastAppliedOp(); + } + /** + * Protobuf type {@code ReplicationLoadSink} + */ + public static final class ReplicationLoadSink extends + com.google.protobuf.GeneratedMessage + implements ReplicationLoadSinkOrBuilder { + // Use ReplicationLoadSink.newBuilder() to construct. + private ReplicationLoadSink(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ReplicationLoadSink(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ReplicationLoadSink defaultInstance; + public static ReplicationLoadSink getDefaultInstance() { + return defaultInstance; + } + + public ReplicationLoadSink getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ReplicationLoadSink( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + ageOfLastAppliedOp_ = input.readUInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + timeStampsOfLastAppliedOp_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ReplicationLoadSink parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReplicationLoadSink(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required uint64 ageOfLastAppliedOp = 1; + public static final int AGEOFLASTAPPLIEDOP_FIELD_NUMBER = 1; + private long ageOfLastAppliedOp_; + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public boolean hasAgeOfLastAppliedOp() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public long getAgeOfLastAppliedOp() { + return ageOfLastAppliedOp_; + } + + // required uint64 timeStampsOfLastAppliedOp = 2; + public static final int TIMESTAMPSOFLASTAPPLIEDOP_FIELD_NUMBER = 2; + private long timeStampsOfLastAppliedOp_; + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public boolean hasTimeStampsOfLastAppliedOp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public long getTimeStampsOfLastAppliedOp() { + return timeStampsOfLastAppliedOp_; + } + + private void initFields() { + ageOfLastAppliedOp_ = 0L; + timeStampsOfLastAppliedOp_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasAgeOfLastAppliedOp()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTimeStampsOfLastAppliedOp()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, ageOfLastAppliedOp_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, timeStampsOfLastAppliedOp_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, ageOfLastAppliedOp_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, timeStampsOfLastAppliedOp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) obj; + + boolean result = true; + result = result && (hasAgeOfLastAppliedOp() == other.hasAgeOfLastAppliedOp()); + if (hasAgeOfLastAppliedOp()) { + result = result && (getAgeOfLastAppliedOp() + == other.getAgeOfLastAppliedOp()); + } + result = result && (hasTimeStampsOfLastAppliedOp() == other.hasTimeStampsOfLastAppliedOp()); + if (hasTimeStampsOfLastAppliedOp()) { + result = result && (getTimeStampsOfLastAppliedOp() + == other.getTimeStampsOfLastAppliedOp()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasAgeOfLastAppliedOp()) { + hash = (37 * hash) + AGEOFLASTAPPLIEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getAgeOfLastAppliedOp()); + } + if (hasTimeStampsOfLastAppliedOp()) { + hash = (37 * hash) + TIMESTAMPSOFLASTAPPLIEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTimeStampsOfLastAppliedOp()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ReplicationLoadSink} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + ageOfLastAppliedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + timeStampsOfLastAppliedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink build() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.ageOfLastAppliedOp_ = ageOfLastAppliedOp_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.timeStampsOfLastAppliedOp_ = timeStampsOfLastAppliedOp_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance()) return this; + if (other.hasAgeOfLastAppliedOp()) { + setAgeOfLastAppliedOp(other.getAgeOfLastAppliedOp()); + } + if (other.hasTimeStampsOfLastAppliedOp()) { + setTimeStampsOfLastAppliedOp(other.getTimeStampsOfLastAppliedOp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasAgeOfLastAppliedOp()) { + + return false; + } + if (!hasTimeStampsOfLastAppliedOp()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required uint64 ageOfLastAppliedOp = 1; + private long ageOfLastAppliedOp_ ; + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public boolean hasAgeOfLastAppliedOp() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public long getAgeOfLastAppliedOp() { + return ageOfLastAppliedOp_; + } + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public Builder setAgeOfLastAppliedOp(long value) { + bitField0_ |= 0x00000001; + ageOfLastAppliedOp_ = value; + onChanged(); + return this; + } + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public Builder clearAgeOfLastAppliedOp() { + bitField0_ = (bitField0_ & ~0x00000001); + ageOfLastAppliedOp_ = 0L; + onChanged(); + return this; + } + + // required uint64 timeStampsOfLastAppliedOp = 2; + private long timeStampsOfLastAppliedOp_ ; + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public boolean hasTimeStampsOfLastAppliedOp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public long getTimeStampsOfLastAppliedOp() { + return timeStampsOfLastAppliedOp_; + } + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public Builder setTimeStampsOfLastAppliedOp(long value) { + bitField0_ |= 0x00000002; + timeStampsOfLastAppliedOp_ = value; + onChanged(); + return this; + } + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public Builder clearTimeStampsOfLastAppliedOp() { + bitField0_ = (bitField0_ & ~0x00000002); + timeStampsOfLastAppliedOp_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ReplicationLoadSink) + } + + static { + defaultInstance = new ReplicationLoadSink(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ReplicationLoadSink) + } + + public interface ReplicationLoadSourceOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string peerID = 1; + /** + * required string peerID = 1; + */ + boolean hasPeerID(); + /** + * required string peerID = 1; + */ + java.lang.String getPeerID(); + /** + * required string peerID = 1; + */ + com.google.protobuf.ByteString + getPeerIDBytes(); + + // required uint64 ageOfLastShippedOp = 2; + /** + * required uint64 ageOfLastShippedOp = 2; + */ + boolean hasAgeOfLastShippedOp(); + /** + * required uint64 ageOfLastShippedOp = 2; + */ + long getAgeOfLastShippedOp(); + + // required uint32 sizeOfLogQueue = 3; + /** + * required uint32 sizeOfLogQueue = 3; + */ + boolean hasSizeOfLogQueue(); + /** + * required uint32 sizeOfLogQueue = 3; + */ + int getSizeOfLogQueue(); + + // required uint64 timeStampOfLastShippedOp = 4; + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + boolean hasTimeStampOfLastShippedOp(); + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + long getTimeStampOfLastShippedOp(); + + // required uint64 replicationLag = 5; + /** + * required uint64 replicationLag = 5; + */ + boolean hasReplicationLag(); + /** + * required uint64 replicationLag = 5; + */ + long getReplicationLag(); + } + /** + * Protobuf type {@code ReplicationLoadSource} + */ + public static final class ReplicationLoadSource extends + com.google.protobuf.GeneratedMessage + implements ReplicationLoadSourceOrBuilder { + // Use ReplicationLoadSource.newBuilder() to construct. + private ReplicationLoadSource(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ReplicationLoadSource(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ReplicationLoadSource defaultInstance; + public static ReplicationLoadSource getDefaultInstance() { + return defaultInstance; + } + + public ReplicationLoadSource getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ReplicationLoadSource( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + peerID_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + ageOfLastShippedOp_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + sizeOfLogQueue_ = input.readUInt32(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + timeStampOfLastShippedOp_ = input.readUInt64(); + break; + } + case 40: { + bitField0_ |= 0x00000010; + replicationLag_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ReplicationLoadSource parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReplicationLoadSource(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string peerID = 1; + public static final int PEERID_FIELD_NUMBER = 1; + private java.lang.Object peerID_; + /** + * required string peerID = 1; + */ + public boolean hasPeerID() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string peerID = 1; + */ + public java.lang.String getPeerID() { + java.lang.Object ref = peerID_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + peerID_ = s; + } + return s; + } + } + /** + * required string peerID = 1; + */ + public com.google.protobuf.ByteString + getPeerIDBytes() { + java.lang.Object ref = peerID_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + peerID_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required uint64 ageOfLastShippedOp = 2; + public static final int AGEOFLASTSHIPPEDOP_FIELD_NUMBER = 2; + private long ageOfLastShippedOp_; + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public boolean hasAgeOfLastShippedOp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public long getAgeOfLastShippedOp() { + return ageOfLastShippedOp_; + } + + // required uint32 sizeOfLogQueue = 3; + public static final int SIZEOFLOGQUEUE_FIELD_NUMBER = 3; + private int sizeOfLogQueue_; + /** + * required uint32 sizeOfLogQueue = 3; + */ + public boolean hasSizeOfLogQueue() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public int getSizeOfLogQueue() { + return sizeOfLogQueue_; + } + + // required uint64 timeStampOfLastShippedOp = 4; + public static final int TIMESTAMPOFLASTSHIPPEDOP_FIELD_NUMBER = 4; + private long timeStampOfLastShippedOp_; + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public boolean hasTimeStampOfLastShippedOp() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public long getTimeStampOfLastShippedOp() { + return timeStampOfLastShippedOp_; + } + + // required uint64 replicationLag = 5; + public static final int REPLICATIONLAG_FIELD_NUMBER = 5; + private long replicationLag_; + /** + * required uint64 replicationLag = 5; + */ + public boolean hasReplicationLag() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 replicationLag = 5; + */ + public long getReplicationLag() { + return replicationLag_; + } + + private void initFields() { + peerID_ = ""; + ageOfLastShippedOp_ = 0L; + sizeOfLogQueue_ = 0; + timeStampOfLastShippedOp_ = 0L; + replicationLag_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasPeerID()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasAgeOfLastShippedOp()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSizeOfLogQueue()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTimeStampOfLastShippedOp()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasReplicationLag()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getPeerIDBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, ageOfLastShippedOp_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt32(3, sizeOfLogQueue_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(4, timeStampOfLastShippedOp_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt64(5, replicationLag_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getPeerIDBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, ageOfLastShippedOp_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(3, sizeOfLogQueue_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, timeStampOfLastShippedOp_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(5, replicationLag_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) obj; + + boolean result = true; + result = result && (hasPeerID() == other.hasPeerID()); + if (hasPeerID()) { + result = result && getPeerID() + .equals(other.getPeerID()); + } + result = result && (hasAgeOfLastShippedOp() == other.hasAgeOfLastShippedOp()); + if (hasAgeOfLastShippedOp()) { + result = result && (getAgeOfLastShippedOp() + == other.getAgeOfLastShippedOp()); + } + result = result && (hasSizeOfLogQueue() == other.hasSizeOfLogQueue()); + if (hasSizeOfLogQueue()) { + result = result && (getSizeOfLogQueue() + == other.getSizeOfLogQueue()); + } + result = result && (hasTimeStampOfLastShippedOp() == other.hasTimeStampOfLastShippedOp()); + if (hasTimeStampOfLastShippedOp()) { + result = result && (getTimeStampOfLastShippedOp() + == other.getTimeStampOfLastShippedOp()); + } + result = result && (hasReplicationLag() == other.hasReplicationLag()); + if (hasReplicationLag()) { + result = result && (getReplicationLag() + == other.getReplicationLag()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasPeerID()) { + hash = (37 * hash) + PEERID_FIELD_NUMBER; + hash = (53 * hash) + getPeerID().hashCode(); + } + if (hasAgeOfLastShippedOp()) { + hash = (37 * hash) + AGEOFLASTSHIPPEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getAgeOfLastShippedOp()); + } + if (hasSizeOfLogQueue()) { + hash = (37 * hash) + SIZEOFLOGQUEUE_FIELD_NUMBER; + hash = (53 * hash) + getSizeOfLogQueue(); + } + if (hasTimeStampOfLastShippedOp()) { + hash = (37 * hash) + TIMESTAMPOFLASTSHIPPEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTimeStampOfLastShippedOp()); + } + if (hasReplicationLag()) { + hash = (37 * hash) + REPLICATIONLAG_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getReplicationLag()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ReplicationLoadSource} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + peerID_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + ageOfLastShippedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + sizeOfLogQueue_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); + timeStampOfLastShippedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + replicationLag_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource build() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.peerID_ = peerID_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.ageOfLastShippedOp_ = ageOfLastShippedOp_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.sizeOfLogQueue_ = sizeOfLogQueue_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.timeStampOfLastShippedOp_ = timeStampOfLastShippedOp_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.replicationLag_ = replicationLag_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance()) return this; + if (other.hasPeerID()) { + bitField0_ |= 0x00000001; + peerID_ = other.peerID_; + onChanged(); + } + if (other.hasAgeOfLastShippedOp()) { + setAgeOfLastShippedOp(other.getAgeOfLastShippedOp()); + } + if (other.hasSizeOfLogQueue()) { + setSizeOfLogQueue(other.getSizeOfLogQueue()); + } + if (other.hasTimeStampOfLastShippedOp()) { + setTimeStampOfLastShippedOp(other.getTimeStampOfLastShippedOp()); + } + if (other.hasReplicationLag()) { + setReplicationLag(other.getReplicationLag()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasPeerID()) { + + return false; + } + if (!hasAgeOfLastShippedOp()) { + + return false; + } + if (!hasSizeOfLogQueue()) { + + return false; + } + if (!hasTimeStampOfLastShippedOp()) { + + return false; + } + if (!hasReplicationLag()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string peerID = 1; + private java.lang.Object peerID_ = ""; + /** + * required string peerID = 1; + */ + public boolean hasPeerID() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string peerID = 1; + */ + public java.lang.String getPeerID() { + java.lang.Object ref = peerID_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + peerID_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string peerID = 1; + */ + public com.google.protobuf.ByteString + getPeerIDBytes() { + java.lang.Object ref = peerID_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + peerID_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string peerID = 1; + */ + public Builder setPeerID( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + peerID_ = value; + onChanged(); + return this; + } + /** + * required string peerID = 1; + */ + public Builder clearPeerID() { + bitField0_ = (bitField0_ & ~0x00000001); + peerID_ = getDefaultInstance().getPeerID(); + onChanged(); + return this; + } + /** + * required string peerID = 1; + */ + public Builder setPeerIDBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + peerID_ = value; + onChanged(); + return this; + } + + // required uint64 ageOfLastShippedOp = 2; + private long ageOfLastShippedOp_ ; + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public boolean hasAgeOfLastShippedOp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public long getAgeOfLastShippedOp() { + return ageOfLastShippedOp_; + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public Builder setAgeOfLastShippedOp(long value) { + bitField0_ |= 0x00000002; + ageOfLastShippedOp_ = value; + onChanged(); + return this; + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public Builder clearAgeOfLastShippedOp() { + bitField0_ = (bitField0_ & ~0x00000002); + ageOfLastShippedOp_ = 0L; + onChanged(); + return this; + } + + // required uint32 sizeOfLogQueue = 3; + private int sizeOfLogQueue_ ; + /** + * required uint32 sizeOfLogQueue = 3; + */ + public boolean hasSizeOfLogQueue() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public int getSizeOfLogQueue() { + return sizeOfLogQueue_; + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public Builder setSizeOfLogQueue(int value) { + bitField0_ |= 0x00000004; + sizeOfLogQueue_ = value; + onChanged(); + return this; + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public Builder clearSizeOfLogQueue() { + bitField0_ = (bitField0_ & ~0x00000004); + sizeOfLogQueue_ = 0; + onChanged(); + return this; + } + + // required uint64 timeStampOfLastShippedOp = 4; + private long timeStampOfLastShippedOp_ ; + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public boolean hasTimeStampOfLastShippedOp() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public long getTimeStampOfLastShippedOp() { + return timeStampOfLastShippedOp_; + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public Builder setTimeStampOfLastShippedOp(long value) { + bitField0_ |= 0x00000008; + timeStampOfLastShippedOp_ = value; + onChanged(); + return this; + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public Builder clearTimeStampOfLastShippedOp() { + bitField0_ = (bitField0_ & ~0x00000008); + timeStampOfLastShippedOp_ = 0L; + onChanged(); + return this; + } + + // required uint64 replicationLag = 5; + private long replicationLag_ ; + /** + * required uint64 replicationLag = 5; + */ + public boolean hasReplicationLag() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 replicationLag = 5; + */ + public long getReplicationLag() { + return replicationLag_; + } + /** + * required uint64 replicationLag = 5; + */ + public Builder setReplicationLag(long value) { + bitField0_ |= 0x00000010; + replicationLag_ = value; + onChanged(); + return this; + } + /** + * required uint64 replicationLag = 5; + */ + public Builder clearReplicationLag() { + bitField0_ = (bitField0_ & ~0x00000010); + replicationLag_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ReplicationLoadSource) + } + + static { + defaultInstance = new ReplicationLoadSource(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ReplicationLoadSource) + } + public interface ServerLoadOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -4685,6 +6134,85 @@ public final class ClusterStatusProtos { * */ int getInfoServerPort(); + + // repeated .ReplicationLoadSource replLoadSource = 10; + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server.
+     * 
+ */ + java.util.List + getReplLoadSourceList(); + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server.
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getReplLoadSource(int index); + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server.
+     * 
+ */ + int getReplLoadSourceCount(); + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server.
+     * 
+ */ + java.util.List + getReplLoadSourceOrBuilderList(); + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server.
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder getReplLoadSourceOrBuilder( + int index); + + // optional .ReplicationLoadSink replLoadSink = 11; + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+     **
+     * The replicationLoadSink for the replication Sink status of this region server.
+     * 
+ */ + boolean hasReplLoadSink(); + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+     **
+     * The replicationLoadSink for the replication Sink status of this region server.
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getReplLoadSink(); + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+     **
+     * The replicationLoadSink for the replication Sink status of this region server.
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder getReplLoadSinkOrBuilder(); } /** * Protobuf type {@code ServerLoad} @@ -4788,6 +6316,27 @@ public final class ClusterStatusProtos { infoServerPort_ = input.readUInt32(); break; } + case 82: { + if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) { + replLoadSource_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000200; + } + replLoadSource_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.PARSER, extensionRegistry)); + break; + } + case 90: { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder subBuilder = null; + if (((bitField0_ & 0x00000080) == 0x00000080)) { + subBuilder = replLoadSink_.toBuilder(); + } + replLoadSink_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(replLoadSink_); + replLoadSink_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000080; + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -4802,6 +6351,9 @@ public final class ClusterStatusProtos { if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { coprocessors_ = java.util.Collections.unmodifiableList(coprocessors_); } + if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) { + replLoadSource_ = java.util.Collections.unmodifiableList(replLoadSource_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -5143,6 +6695,104 @@ public final class ClusterStatusProtos { return infoServerPort_; } + // repeated .ReplicationLoadSource replLoadSource = 10; + public static final int REPLLOADSOURCE_FIELD_NUMBER = 10; + private java.util.List replLoadSource_; + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server.
+     * 
+ */ + public java.util.List getReplLoadSourceList() { + return replLoadSource_; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server.
+     * 
+ */ + public java.util.List + getReplLoadSourceOrBuilderList() { + return replLoadSource_; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server.
+     * 
+ */ + public int getReplLoadSourceCount() { + return replLoadSource_.size(); + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server.
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getReplLoadSource(int index) { + return replLoadSource_.get(index); + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server.
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder getReplLoadSourceOrBuilder( + int index) { + return replLoadSource_.get(index); + } + + // optional .ReplicationLoadSink replLoadSink = 11; + public static final int REPLLOADSINK_FIELD_NUMBER = 11; + private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink replLoadSink_; + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+     **
+     * The replicationLoadSink for the replication Sink status of this region server.
+     * 
+ */ + public boolean hasReplLoadSink() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+     **
+     * The replicationLoadSink for the replication Sink status of this region server.
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getReplLoadSink() { + return replLoadSink_; + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+     **
+     * The replicationLoadSink for the replication Sink status of this region server.
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder getReplLoadSinkOrBuilder() { + return replLoadSink_; + } + private void initFields() { numberOfRequests_ = 0; totalNumberOfRequests_ = 0; @@ -5153,6 +6803,8 @@ public final class ClusterStatusProtos { reportStartTime_ = 0L; reportEndTime_ = 0L; infoServerPort_ = 0; + replLoadSource_ = java.util.Collections.emptyList(); + replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -5171,6 +6823,18 @@ public final class ClusterStatusProtos { return false; } } + for (int i = 0; i < getReplLoadSourceCount(); i++) { + if (!getReplLoadSource(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasReplLoadSink()) { + if (!getReplLoadSink().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -5205,6 +6869,12 @@ public final class ClusterStatusProtos { if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeUInt32(9, infoServerPort_); } + for (int i = 0; i < replLoadSource_.size(); i++) { + output.writeMessage(10, replLoadSource_.get(i)); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeMessage(11, replLoadSink_); + } getUnknownFields().writeTo(output); } @@ -5250,6 +6920,14 @@ public final class ClusterStatusProtos { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(9, infoServerPort_); } + for (int i = 0; i < replLoadSource_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(10, replLoadSource_.get(i)); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(11, replLoadSink_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -5312,6 +6990,13 @@ public final class ClusterStatusProtos { result = result && (getInfoServerPort() == other.getInfoServerPort()); } + result = result && getReplLoadSourceList() + .equals(other.getReplLoadSourceList()); + result = result && (hasReplLoadSink() == other.hasReplLoadSink()); + if (hasReplLoadSink()) { + result = result && getReplLoadSink() + .equals(other.getReplLoadSink()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -5361,6 +7046,14 @@ public final class ClusterStatusProtos { hash = (37 * hash) + INFO_SERVER_PORT_FIELD_NUMBER; hash = (53 * hash) + getInfoServerPort(); } + if (getReplLoadSourceCount() > 0) { + hash = (37 * hash) + REPLLOADSOURCE_FIELD_NUMBER; + hash = (53 * hash) + getReplLoadSourceList().hashCode(); + } + if (hasReplLoadSink()) { + hash = (37 * hash) + REPLLOADSINK_FIELD_NUMBER; + hash = (53 * hash) + getReplLoadSink().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -5464,6 +7157,8 @@ public final class ClusterStatusProtos { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getRegionLoadsFieldBuilder(); getCoprocessorsFieldBuilder(); + getReplLoadSourceFieldBuilder(); + getReplLoadSinkFieldBuilder(); } } private static Builder create() { @@ -5498,6 +7193,18 @@ public final class ClusterStatusProtos { bitField0_ = (bitField0_ & ~0x00000080); infoServerPort_ = 0; bitField0_ = (bitField0_ & ~0x00000100); + if (replLoadSourceBuilder_ == null) { + replLoadSource_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); + } else { + replLoadSourceBuilder_.clear(); + } + if (replLoadSinkBuilder_ == null) { + replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); + } else { + replLoadSinkBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000400); return this; } @@ -5572,6 +7279,23 @@ public final class ClusterStatusProtos { to_bitField0_ |= 0x00000040; } result.infoServerPort_ = infoServerPort_; + if (replLoadSourceBuilder_ == null) { + if (((bitField0_ & 0x00000200) == 0x00000200)) { + replLoadSource_ = java.util.Collections.unmodifiableList(replLoadSource_); + bitField0_ = (bitField0_ & ~0x00000200); + } + result.replLoadSource_ = replLoadSource_; + } else { + result.replLoadSource_ = replLoadSourceBuilder_.build(); + } + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000080; + } + if (replLoadSinkBuilder_ == null) { + result.replLoadSink_ = replLoadSink_; + } else { + result.replLoadSink_ = replLoadSinkBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -5661,6 +7385,35 @@ public final class ClusterStatusProtos { if (other.hasInfoServerPort()) { setInfoServerPort(other.getInfoServerPort()); } + if (replLoadSourceBuilder_ == null) { + if (!other.replLoadSource_.isEmpty()) { + if (replLoadSource_.isEmpty()) { + replLoadSource_ = other.replLoadSource_; + bitField0_ = (bitField0_ & ~0x00000200); + } else { + ensureReplLoadSourceIsMutable(); + replLoadSource_.addAll(other.replLoadSource_); + } + onChanged(); + } + } else { + if (!other.replLoadSource_.isEmpty()) { + if (replLoadSourceBuilder_.isEmpty()) { + replLoadSourceBuilder_.dispose(); + replLoadSourceBuilder_ = null; + replLoadSource_ = other.replLoadSource_; + bitField0_ = (bitField0_ & ~0x00000200); + replLoadSourceBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getReplLoadSourceFieldBuilder() : null; + } else { + replLoadSourceBuilder_.addAllMessages(other.replLoadSource_); + } + } + } + if (other.hasReplLoadSink()) { + mergeReplLoadSink(other.getReplLoadSink()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -5678,6 +7431,18 @@ public final class ClusterStatusProtos { return false; } } + for (int i = 0; i < getReplLoadSourceCount(); i++) { + if (!getReplLoadSource(i).isInitialized()) { + + return false; + } + } + if (hasReplLoadSink()) { + if (!getReplLoadSink().isInitialized()) { + + return false; + } + } return true; } @@ -6749,6 +8514,498 @@ public final class ClusterStatusProtos { return this; } + // repeated .ReplicationLoadSource replLoadSource = 10; + private java.util.List replLoadSource_ = + java.util.Collections.emptyList(); + private void ensureReplLoadSourceIsMutable() { + if (!((bitField0_ & 0x00000200) == 0x00000200)) { + replLoadSource_ = new java.util.ArrayList(replLoadSource_); + bitField0_ |= 0x00000200; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder> replLoadSourceBuilder_; + + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public java.util.List getReplLoadSourceList() { + if (replLoadSourceBuilder_ == null) { + return java.util.Collections.unmodifiableList(replLoadSource_); + } else { + return replLoadSourceBuilder_.getMessageList(); + } + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public int getReplLoadSourceCount() { + if (replLoadSourceBuilder_ == null) { + return replLoadSource_.size(); + } else { + return replLoadSourceBuilder_.getCount(); + } + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getReplLoadSource(int index) { + if (replLoadSourceBuilder_ == null) { + return replLoadSource_.get(index); + } else { + return replLoadSourceBuilder_.getMessage(index); + } + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public Builder setReplLoadSource( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource value) { + if (replLoadSourceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplLoadSourceIsMutable(); + replLoadSource_.set(index, value); + onChanged(); + } else { + replLoadSourceBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public Builder setReplLoadSource( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder builderForValue) { + if (replLoadSourceBuilder_ == null) { + ensureReplLoadSourceIsMutable(); + replLoadSource_.set(index, builderForValue.build()); + onChanged(); + } else { + replLoadSourceBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public Builder addReplLoadSource(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource value) { + if (replLoadSourceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplLoadSourceIsMutable(); + replLoadSource_.add(value); + onChanged(); + } else { + replLoadSourceBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public Builder addReplLoadSource( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource value) { + if (replLoadSourceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplLoadSourceIsMutable(); + replLoadSource_.add(index, value); + onChanged(); + } else { + replLoadSourceBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public Builder addReplLoadSource( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder builderForValue) { + if (replLoadSourceBuilder_ == null) { + ensureReplLoadSourceIsMutable(); + replLoadSource_.add(builderForValue.build()); + onChanged(); + } else { + replLoadSourceBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public Builder addReplLoadSource( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder builderForValue) { + if (replLoadSourceBuilder_ == null) { + ensureReplLoadSourceIsMutable(); + replLoadSource_.add(index, builderForValue.build()); + onChanged(); + } else { + replLoadSourceBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public Builder addAllReplLoadSource( + java.lang.Iterable values) { + if (replLoadSourceBuilder_ == null) { + ensureReplLoadSourceIsMutable(); + super.addAll(values, replLoadSource_); + onChanged(); + } else { + replLoadSourceBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public Builder clearReplLoadSource() { + if (replLoadSourceBuilder_ == null) { + replLoadSource_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); + onChanged(); + } else { + replLoadSourceBuilder_.clear(); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public Builder removeReplLoadSource(int index) { + if (replLoadSourceBuilder_ == null) { + ensureReplLoadSourceIsMutable(); + replLoadSource_.remove(index); + onChanged(); + } else { + replLoadSourceBuilder_.remove(index); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder getReplLoadSourceBuilder( + int index) { + return getReplLoadSourceFieldBuilder().getBuilder(index); + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder getReplLoadSourceOrBuilder( + int index) { + if (replLoadSourceBuilder_ == null) { + return replLoadSource_.get(index); } else { + return replLoadSourceBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public java.util.List + getReplLoadSourceOrBuilderList() { + if (replLoadSourceBuilder_ != null) { + return replLoadSourceBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(replLoadSource_); + } + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder addReplLoadSourceBuilder() { + return getReplLoadSourceFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance()); + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder addReplLoadSourceBuilder( + int index) { + return getReplLoadSourceFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance()); + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+       **
+       * The replicationLoadSource for the replication Source status of this region server.
+       * 
+ */ + public java.util.List + getReplLoadSourceBuilderList() { + return getReplLoadSourceFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder> + getReplLoadSourceFieldBuilder() { + if (replLoadSourceBuilder_ == null) { + replLoadSourceBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder>( + replLoadSource_, + ((bitField0_ & 0x00000200) == 0x00000200), + getParentForChildren(), + isClean()); + replLoadSource_ = null; + } + return replLoadSourceBuilder_; + } + + // optional .ReplicationLoadSink replLoadSink = 11; + private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder> replLoadSinkBuilder_; + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+       **
+       * The replicationLoadSink for the replication Sink status of this region server.
+       * 
+ */ + public boolean hasReplLoadSink() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+       **
+       * The replicationLoadSink for the replication Sink status of this region server.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getReplLoadSink() { + if (replLoadSinkBuilder_ == null) { + return replLoadSink_; + } else { + return replLoadSinkBuilder_.getMessage(); + } + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+       **
+       * The replicationLoadSink for the replication Sink status of this region server.
+       * 
+ */ + public Builder setReplLoadSink(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink value) { + if (replLoadSinkBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + replLoadSink_ = value; + onChanged(); + } else { + replLoadSinkBuilder_.setMessage(value); + } + bitField0_ |= 0x00000400; + return this; + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+       **
+       * The replicationLoadSink for the replication Sink status of this region server.
+       * 
+ */ + public Builder setReplLoadSink( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder builderForValue) { + if (replLoadSinkBuilder_ == null) { + replLoadSink_ = builderForValue.build(); + onChanged(); + } else { + replLoadSinkBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000400; + return this; + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+       **
+       * The replicationLoadSink for the replication Sink status of this region server.
+       * 
+ */ + public Builder mergeReplLoadSink(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink value) { + if (replLoadSinkBuilder_ == null) { + if (((bitField0_ & 0x00000400) == 0x00000400) && + replLoadSink_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance()) { + replLoadSink_ = + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.newBuilder(replLoadSink_).mergeFrom(value).buildPartial(); + } else { + replLoadSink_ = value; + } + onChanged(); + } else { + replLoadSinkBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000400; + return this; + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+       **
+       * The replicationLoadSink for the replication Sink status of this region server.
+       * 
+ */ + public Builder clearReplLoadSink() { + if (replLoadSinkBuilder_ == null) { + replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); + onChanged(); + } else { + replLoadSinkBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000400); + return this; + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+       **
+       * The replicationLoadSink for the replication Sink status of this region server.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder getReplLoadSinkBuilder() { + bitField0_ |= 0x00000400; + onChanged(); + return getReplLoadSinkFieldBuilder().getBuilder(); + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+       **
+       * The replicationLoadSink for the replication Sink status of this region server.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder getReplLoadSinkOrBuilder() { + if (replLoadSinkBuilder_ != null) { + return replLoadSinkBuilder_.getMessageOrBuilder(); + } else { + return replLoadSink_; + } + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+       **
+       * The replicationLoadSink for the replication Sink status of this region server.
+       * 
+ */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder> + getReplLoadSinkFieldBuilder() { + if (replLoadSinkBuilder_ == null) { + replLoadSinkBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder>( + replLoadSink_, + getParentForChildren(), + isClean()); + replLoadSink_ = null; + } + return replLoadSinkBuilder_; + } + // @@protoc_insertion_point(builder_scope:ServerLoad) } @@ -10526,6 +12783,16 @@ public final class ClusterStatusProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegionLoad_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ReplicationLoadSink_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ReplicationLoadSink_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ReplicationLoadSource_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ReplicationLoadSource_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_ServerLoad_descriptor; private static @@ -10575,26 +12842,34 @@ public final class ClusterStatusProtos { "(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" + "\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rdata_loc" + "ality\030\020 \001(\002\022#\n\030last_major_compaction_ts\030" + - "\021 \001(\004:\0010\"\212\002\n\nServerLoad\022\032\n\022number_of_req" + - "uests\030\001 \001(\r\022 \n\030total_number_of_requests\030" + - "\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_heap_" + - "MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.RegionL" + - "oad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocessor\022", - "\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_end" + - "_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r\"O\n" + - "\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Server" + - "Name\022 \n\013server_load\030\002 \002(\0132\013.ServerLoad\"\340" + - "\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001(\0132" + - "\030.HBaseVersionFileContent\022%\n\014live_server" + - "s\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_servers" + - "\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_transi" + - "tion\030\004 \003(\0132\023.RegionInTransition\022\036\n\nclust" + - "er_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_coproc", - "essors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030\007 \001" + - "(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003(\0132" + - "\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*org" + - ".apache.hadoop.hbase.protobuf.generatedB" + - "\023ClusterStatusProtosH\001\240\001\001" + "\021 \001(\004:\0010\"T\n\023ReplicationLoadSink\022\032\n\022ageOf" + + "LastAppliedOp\030\001 \002(\004\022!\n\031timeStampsOfLastA" + + "ppliedOp\030\002 \002(\004\"\225\001\n\025ReplicationLoadSource" + + "\022\016\n\006peerID\030\001 \002(\t\022\032\n\022ageOfLastShippedOp\030\002" + + " \002(\004\022\026\n\016sizeOfLogQueue\030\003 \002(\r\022 \n\030timeStam", + "pOfLastShippedOp\030\004 \002(\004\022\026\n\016replicationLag" + + "\030\005 \002(\004\"\346\002\n\nServerLoad\022\032\n\022number_of_reque" + + "sts\030\001 \001(\r\022 \n\030total_number_of_requests\030\002 " + + "\001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_heap_MB" + + "\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.RegionLoa" + + "d\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocessor\022\031\n" + + "\021report_start_time\030\007 \001(\004\022\027\n\017report_end_t" + + "ime\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r\022.\n\016r" + + "eplLoadSource\030\n \003(\0132\026.ReplicationLoadSou" + + "rce\022*\n\014replLoadSink\030\013 \001(\0132\024.ReplicationL", + "oadSink\"O\n\016LiveServerInfo\022\033\n\006server\030\001 \002(" + + "\0132\013.ServerName\022 \n\013server_load\030\002 \002(\0132\013.Se" + + "rverLoad\"\340\002\n\rClusterStatus\022/\n\rhbase_vers" + + "ion\030\001 \001(\0132\030.HBaseVersionFileContent\022%\n\014l" + + "ive_servers\030\002 \003(\0132\017.LiveServerInfo\022!\n\014de" + + "ad_servers\030\003 \003(\0132\013.ServerName\0222\n\025regions" + + "_in_transition\030\004 \003(\0132\023.RegionInTransitio" + + "n\022\036\n\ncluster_id\030\005 \001(\0132\n.ClusterId\022)\n\023mas" + + "ter_coprocessors\030\006 \003(\0132\014.Coprocessor\022\033\n\006" + + "master\030\007 \001(\0132\013.ServerName\022#\n\016backup_mast", + "ers\030\010 \003(\0132\013.ServerName\022\023\n\013balancer_on\030\t " + + "\001(\010BF\n*org.apache.hadoop.hbase.protobuf." + + "generatedB\023ClusterStatusProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -10619,20 +12894,32 @@ public final class ClusterStatusProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionLoad_descriptor, new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "LastMajorCompactionTs", }); - internal_static_ServerLoad_descriptor = + internal_static_ReplicationLoadSink_descriptor = getDescriptor().getMessageTypes().get(3); + internal_static_ReplicationLoadSink_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ReplicationLoadSink_descriptor, + new java.lang.String[] { "AgeOfLastAppliedOp", "TimeStampsOfLastAppliedOp", }); + internal_static_ReplicationLoadSource_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_ReplicationLoadSource_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ReplicationLoadSource_descriptor, + new java.lang.String[] { "PeerID", "AgeOfLastShippedOp", "SizeOfLogQueue", "TimeStampOfLastShippedOp", "ReplicationLag", }); + internal_static_ServerLoad_descriptor = + getDescriptor().getMessageTypes().get(5); internal_static_ServerLoad_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ServerLoad_descriptor, - new java.lang.String[] { "NumberOfRequests", "TotalNumberOfRequests", "UsedHeapMB", "MaxHeapMB", "RegionLoads", "Coprocessors", "ReportStartTime", "ReportEndTime", "InfoServerPort", }); + new java.lang.String[] { "NumberOfRequests", "TotalNumberOfRequests", "UsedHeapMB", "MaxHeapMB", "RegionLoads", "Coprocessors", "ReportStartTime", "ReportEndTime", "InfoServerPort", "ReplLoadSource", "ReplLoadSink", }); internal_static_LiveServerInfo_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(6); internal_static_LiveServerInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_LiveServerInfo_descriptor, new java.lang.String[] { "Server", "ServerLoad", }); internal_static_ClusterStatus_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(7); internal_static_ClusterStatus_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ClusterStatus_descriptor, diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto index 2b2d9eb8ac0..bb531cc914e 100644 --- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto +++ b/hbase-protocol/src/main/protobuf/ClusterStatus.proto @@ -119,6 +119,19 @@ message RegionLoad { /* Server-level protobufs */ +message ReplicationLoadSink { + required uint64 ageOfLastAppliedOp = 1; + required uint64 timeStampsOfLastAppliedOp = 2; +} + +message ReplicationLoadSource { + required string peerID = 1; + required uint64 ageOfLastShippedOp = 2; + required uint32 sizeOfLogQueue = 3; + required uint64 timeStampOfLastShippedOp = 4; + required uint64 replicationLag = 5; +} + message ServerLoad { /** Number of requests since last report. */ optional uint32 number_of_requests = 1; @@ -160,6 +173,16 @@ message ServerLoad { * The port number that this region server is hosing an info server on. */ optional uint32 info_server_port = 9; + + /** + * The replicationLoadSource for the replication Source status of this region server. + */ + repeated ReplicationLoadSource replLoadSource = 10; + + /** + * The replicationLoadSink for the replication Sink status of this region server. + */ + optional ReplicationLoadSink replLoadSink = 11; } message LiveServerInfo { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index c170a65df12..4574a01f94a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -130,6 +130,7 @@ import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler; import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.trace.SpanReceiverHost; import org.apache.hadoop.hbase.util.Addressing; @@ -1147,6 +1148,22 @@ public class HRegionServer extends HasThread implements } else { serverLoad.setInfoServerPort(-1); } + + // for the replicationLoad purpose. Only need to get from one service + // either source or sink will get the same info + ReplicationSourceService rsources = getReplicationSourceService(); + + if (rsources != null) { + // always refresh first to get the latest value + ReplicationLoad rLoad = rsources.refreshAndGetReplicationLoad(); + if (rLoad != null) { + serverLoad.setReplLoadSink(rLoad.getReplicationLoadSink()); + for (ClusterStatusProtos.ReplicationLoadSource rLS : rLoad.getReplicationLoadSourceList()) { + serverLoad.addReplLoadSource(rLS); + } + } + } + return serverLoad.build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java index 92ac8236c72..25a27a90534 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java @@ -22,11 +22,12 @@ import java.io.IOException; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; /** - * Gateway to Cluster Replication. + * Gateway to Cluster Replication. * Used by {@link org.apache.hadoop.hbase.regionserver.HRegionServer}. * One such application is a cross-datacenter * replication service that can keep two hbase clusters in sync. @@ -52,4 +53,9 @@ public interface ReplicationService { * Stops replication service. */ void stopReplicationService(); + + /** + * Refresh and Get ReplicationLoad + */ + public ReplicationLoad refreshAndGetReplicationLoad(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java index 0c9d0169124..37dc1dd4e4e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java @@ -71,4 +71,21 @@ public class MetricsSink { mss.incrAppliedOps(batchSize); } + /** + * Get the Age of Last Applied Op + * @return ageOfLastAppliedOp + */ + public long getAgeOfLastAppliedOp() { + return mss.getLastAppliedOpAge(); + } + + /** + * Get the TimeStampOfLastAppliedOp. If no replication Op applied yet, the value is the timestamp + * at which hbase instance starts + * @return timeStampsOfLastAppliedOp; + */ + public long getTimeStampOfLastAppliedOp() { + return this.lastTimestampForAge; + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java index a734b9ce07f..21296a011e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java @@ -36,6 +36,7 @@ public class MetricsSource { private long lastTimestamp = 0; private int lastQueueSize = 0; + private String id; private final MetricsReplicationSourceSource singleSourceSource; private final MetricsReplicationSourceSource globalSourceSource; @@ -46,6 +47,7 @@ public class MetricsSource { * @param id Name of the source this class is monitoring */ public MetricsSource(String id) { + this.id = id; singleSourceSource = CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class) .getSource(id); @@ -143,4 +145,36 @@ public class MetricsSource { globalSourceSource.decrSizeOfLogQueue(lastQueueSize); lastQueueSize = 0; } + + /** + * Get AgeOfLastShippedOp + * @return AgeOfLastShippedOp + */ + public Long getAgeOfLastShippedOp() { + return singleSourceSource.getLastShippedAge(); + } + + /** + * Get the sizeOfLogQueue + * @return sizeOfLogQueue + */ + public int getSizeOfLogQueue() { + return this.lastQueueSize; + } + + /** + * Get the timeStampsOfLastShippedOp + * @return lastTimestampForAge + */ + public long getTimeStampOfLastShippedOp() { + return lastTimestamp; + } + + /** + * Get the slave peer ID + * @return peerID + */ + public String getPeerID() { + return id; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index b30698caf78..5b0f469a04a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.hbase.HConstants.REPLICATION_ENABLE_KEY; import static org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_LOCAL; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.NavigableMap; import java.util.TreeMap; @@ -65,7 +66,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; * Gateway to Replication. Used by {@link org.apache.hadoop.hbase.regionserver.HRegionServer}. */ @InterfaceAudience.Private -public class Replication extends WALActionsListener.Base implements +public class Replication extends WALActionsListener.Base implements ReplicationSourceService, ReplicationSinkService { private static final Log LOG = LogFactory.getLog(Replication.class); @@ -81,6 +82,8 @@ public class Replication extends WALActionsListener.Base implements /** Statistics thread schedule pool */ private ScheduledExecutorService scheduleThreadPool; private int statsThreadPeriod; + // ReplicationLoad to access replication metrics + private ReplicationLoad replicationLoad; /** * Instantiate the replication management (if rep is enabled). @@ -137,11 +140,13 @@ public class Replication extends WALActionsListener.Base implements this.statsThreadPeriod = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); LOG.debug("ReplicationStatisticsThread " + this.statsThreadPeriod); + this.replicationLoad = new ReplicationLoad(); } else { this.replicationManager = null; this.replicationQueues = null; this.replicationPeers = null; this.replicationTracker = null; + this.replicationLoad = null; } } @@ -309,4 +314,29 @@ public class Replication extends WALActionsListener.Base implements } } } + + @Override + public ReplicationLoad refreshAndGetReplicationLoad() { + if (this.replicationLoad == null) { + return null; + } + // always build for latest data + buildReplicationLoad(); + return this.replicationLoad; + } + + private void buildReplicationLoad() { + // get source + List sources = this.replicationManager.getSources(); + List sourceMetricsList = new ArrayList(); + + for (ReplicationSourceInterface source : sources) { + if (source instanceof ReplicationSource) { + sourceMetricsList.add(((ReplicationSource) source).getSourceMetrics()); + } + } + // get sink + MetricsSink sinkMetrics = this.replicationSink.getSinkMetrics(); + this.replicationLoad.buildReplicationLoad(sourceMetricsList, sinkMetrics); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java new file mode 100644 index 00000000000..b3f3ecbcc0e --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java @@ -0,0 +1,151 @@ +/** + * Copyright 2014 The Apache Software Foundation Licensed to the Apache Software Foundation (ASF) + * under one or more contributor license agreements. See the NOTICE file distributed with this work + * for additional information regarding copyright ownership. The ASF licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in + * writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.replication.regionserver; + +import java.util.Date; +import java.util.List; +import java.util.ArrayList; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Strings; + +/** + * This class is used for exporting some of the info from replication metrics + */ +@InterfaceAudience.Private +public class ReplicationLoad { + + // Empty load instance. + public static final ReplicationLoad EMPTY_REPLICATIONLOAD = new ReplicationLoad(); + + private List sourceMetricsList; + private MetricsSink sinkMetrics; + + private List replicationLoadSourceList; + private ClusterStatusProtos.ReplicationLoadSink replicationLoadSink; + + /** default constructor */ + public ReplicationLoad() { + super(); + } + + /** + * buildReplicationLoad + * @param srMetricsList + * @param skMetrics + */ + + public void buildReplicationLoad(final List srMetricsList, + final MetricsSink skMetrics) { + this.sourceMetricsList = srMetricsList; + this.sinkMetrics = skMetrics; + + // build the SinkLoad + ClusterStatusProtos.ReplicationLoadSink.Builder rLoadSinkBuild = + ClusterStatusProtos.ReplicationLoadSink.newBuilder(); + rLoadSinkBuild.setAgeOfLastAppliedOp(sinkMetrics.getAgeOfLastAppliedOp()); + rLoadSinkBuild.setTimeStampsOfLastAppliedOp(sinkMetrics.getTimeStampOfLastAppliedOp()); + this.replicationLoadSink = rLoadSinkBuild.build(); + + // build the SourceLoad List + this.replicationLoadSourceList = new ArrayList(); + for (MetricsSource sm : this.sourceMetricsList) { + long ageOfLastShippedOp = sm.getAgeOfLastShippedOp(); + int sizeOfLogQueue = sm.getSizeOfLogQueue(); + long timeStampOfLastShippedOp = sm.getTimeStampOfLastShippedOp(); + long replicationLag; + long timePassedAfterLastShippedOp = + EnvironmentEdgeManager.currentTime() - timeStampOfLastShippedOp; + if (sizeOfLogQueue != 0) { + // err on the large side + replicationLag = Math.max(ageOfLastShippedOp, timePassedAfterLastShippedOp); + } else if (timePassedAfterLastShippedOp < 2 * ageOfLastShippedOp) { + replicationLag = ageOfLastShippedOp; // last shipped happen recently + } else { + // last shipped may happen last night, + // so NO real lag although ageOfLastShippedOp is non-zero + replicationLag = 0; + } + + ClusterStatusProtos.ReplicationLoadSource.Builder rLoadSourceBuild = + ClusterStatusProtos.ReplicationLoadSource.newBuilder(); + rLoadSourceBuild.setPeerID(sm.getPeerID()); + rLoadSourceBuild.setAgeOfLastShippedOp(ageOfLastShippedOp); + rLoadSourceBuild.setSizeOfLogQueue(sizeOfLogQueue); + rLoadSourceBuild.setTimeStampOfLastShippedOp(timeStampOfLastShippedOp); + rLoadSourceBuild.setReplicationLag(replicationLag); + + this.replicationLoadSourceList.add(rLoadSourceBuild.build()); + } + + } + + /** + * sourceToString + * @return a string contains sourceReplicationLoad information + */ + public String sourceToString() { + if (this.sourceMetricsList == null) return null; + + StringBuilder sb = new StringBuilder(); + + for (ClusterStatusProtos.ReplicationLoadSource rls : this.replicationLoadSourceList) { + + sb = Strings.appendKeyValue(sb, "\n PeerID", rls.getPeerID()); + sb = Strings.appendKeyValue(sb, "AgeOfLastShippedOp", rls.getAgeOfLastShippedOp()); + sb = Strings.appendKeyValue(sb, "SizeOfLogQueue", rls.getSizeOfLogQueue()); + sb = + Strings.appendKeyValue(sb, "TimeStampsOfLastShippedOp", + (new Date(rls.getTimeStampOfLastShippedOp()).toString())); + sb = Strings.appendKeyValue(sb, "Replication Lag", rls.getReplicationLag()); + } + + return sb.toString(); + } + + /** + * sinkToString + * @return a string contains sinkReplicationLoad information + */ + public String sinkToString() { + if (this.replicationLoadSink == null) return null; + + StringBuilder sb = new StringBuilder(); + sb = + Strings.appendKeyValue(sb, "AgeOfLastAppliedOp", + this.replicationLoadSink.getAgeOfLastAppliedOp()); + sb = + Strings.appendKeyValue(sb, "TimeStampsOfLastAppliedOp", + (new Date(this.replicationLoadSink.getTimeStampsOfLastAppliedOp()).toString())); + + return sb.toString(); + } + + public ClusterStatusProtos.ReplicationLoadSink getReplicationLoadSink() { + return this.replicationLoadSink; + } + + public List getReplicationLoadSourceList() { + return this.replicationLoadSourceList; + } + + /** + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + return this.sourceToString() + System.getProperty("line.separator") + this.sinkToString(); + } + +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index 9a6013188da..32764180aa4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -254,4 +254,12 @@ public class ReplicationSink { "age in ms of last applied edit: " + this.metrics.refreshAgeOfLastAppliedOp() + ", total replicated edits: " + this.totalReplicatedEdits; } + + /** + * Get replication Sink Metrics + * @return MetricsSink + */ + public MetricsSink getSinkMetrics() { + return this.metrics; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index ee43956f482..714080fee68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -869,4 +869,12 @@ public class ReplicationSource extends Thread ", currently replicating from: " + this.currentPath + " at position: " + position; } + + /** + * Get Replication Source Metrics + * @return sourceMetrics + */ + public MetricsSource getSourceMetrics() { + return this.metrics; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java index f0db865bad2..2dc3c896559 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java @@ -31,11 +31,15 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -556,4 +560,45 @@ public class TestReplicationSmallTests extends TestReplicationBase { hadmin.close(); } + /** + * Test for HBASE-9531 + * put a few rows into htable1, which should be replicated to htable2 + * create a ClusterStatus instance 'status' from HBaseAdmin + * test : status.getLoad(server).getReplicationLoadSourceList() + * test : status.getLoad(server).getReplicationLoadSink() + * * @throws Exception + */ + @Test(timeout = 300000) + public void testReplicationStatus() throws Exception { + LOG.info("testReplicationStatus"); + + try (Admin admin = utility1.getConnection().getAdmin()) { + + final byte[] qualName = Bytes.toBytes("q"); + Put p; + + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p = new Put(Bytes.toBytes("row" + i)); + p.add(famName, qualName, Bytes.toBytes("val" + i)); + htable1.put(p); + } + + ClusterStatus status = admin.getClusterStatus(); + + for (ServerName server : status.getServers()) { + ServerLoad sl = status.getLoad(server); + List rLoadSourceList = sl.getReplicationLoadSourceList(); + ReplicationLoadSink rLoadSink = sl.getReplicationLoadSink(); + + // check SourceList has at least one entry + assertTrue("failed to get ReplicationLoadSourceList", (rLoadSourceList.size() > 0)); + + // check Sink exist only as it is difficult to verify the value on the fly + assertTrue("failed to get ReplicationLoadSink.AgeOfLastShippedOp ", + (rLoadSink.getAgeOfLastAppliedOp() >= 0)); + assertTrue("failed to get ReplicationLoadSink.TimeStampsOfLastAppliedOp ", + (rLoadSink.getTimeStampsOfLastAppliedOp() >= 0)); + } + } + } } diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index c0ea862561a..35ee36cd760 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -608,7 +608,7 @@ module Hbase end end - def status(format) + def status(format, type) status = @admin.getClusterStatus() if format == "detailed" puts("version %s" % [ status.getHBaseVersion() ]) @@ -635,6 +635,46 @@ module Hbase for server in status.getDeadServerNames() puts(" %s" % [ server ]) end + elsif format == "replication" + #check whether replication is enabled or not + if (!@admin.getConfiguration().getBoolean(org.apache.hadoop.hbase.HConstants::REPLICATION_ENABLE_KEY, + org.apache.hadoop.hbase.HConstants::REPLICATION_ENABLE_DEFAULT)) + puts("Please enable replication first.") + else + puts("version %s" % [ status.getHBaseVersion() ]) + puts("%d live servers" % [ status.getServersSize() ]) + for server in status.getServers() + sl = status.getLoad(server) + rSinkString = " SINK :" + rSourceString = " SOURCE:" + rLoadSink = sl.getReplicationLoadSink() + rSinkString << " AgeOfLastAppliedOp=" + rLoadSink.getAgeOfLastAppliedOp().to_s + rSinkString << ", TimeStampsOfLastAppliedOp=" + + (java.util.Date.new(rLoadSink.getTimeStampsOfLastAppliedOp())).toString() + rLoadSourceList = sl.getReplicationLoadSourceList() + index = 0 + while index < rLoadSourceList.size() + rLoadSource = rLoadSourceList.get(index) + rSourceString << " PeerID=" + rLoadSource.getPeerID() + rSourceString << ", AgeOfLastShippedOp=" + rLoadSource.getAgeOfLastShippedOp().to_s + rSourceString << ", SizeOfLogQueue=" + rLoadSource.getSizeOfLogQueue().to_s + rSourceString << ", TimeStampsOfLastShippedOp=" + + (java.util.Date.new(rLoadSource.getTimeStampOfLastShippedOp())).toString() + rSourceString << ", Replication Lag=" + rLoadSource.getReplicationLag().to_s + index = index + 1 + end + puts(" %s:" % + [ server.getHostname() ]) + if type.casecmp("SOURCE") == 0 + puts("%s" % rSourceString) + elsif type.casecmp("SINK") == 0 + puts("%s" % rSinkString) + else + puts("%s" % rSourceString) + puts("%s" % rSinkString) + end + end + end elsif format == "simple" load = 0 regions = 0 diff --git a/hbase-shell/src/main/ruby/shell/commands/status.rb b/hbase-shell/src/main/ruby/shell/commands/status.rb index f72c13caef6..b22b2723987 100644 --- a/hbase-shell/src/main/ruby/shell/commands/status.rb +++ b/hbase-shell/src/main/ruby/shell/commands/status.rb @@ -22,18 +22,21 @@ module Shell class Status < Command def help return <<-EOF -Show cluster status. Can be 'summary', 'simple', or 'detailed'. The +Show cluster status. Can be 'summary', 'simple', 'detailed', or 'replication'. The default is 'summary'. Examples: hbase> status hbase> status 'simple' hbase> status 'summary' hbase> status 'detailed' + hbase> status 'replication' + hbase> status 'replication', 'source' + hbase> status 'replication', 'sink' EOF end - def command(format = 'summary') - admin.status(format) + def command(format = 'summary',type = 'both') + admin.status(format, type) end end end diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb index caede3ad9f6..19258649c79 100644 --- a/hbase-shell/src/test/ruby/hbase/admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb @@ -356,5 +356,17 @@ module Hbase assert_not_equal(nil, table) table.close end + + define_test "Get replication status" do + replication_status("replication", "both") + end + + define_test "Get replication source metrics information" do + replication_status("replication", "source") + end + + define_test "Get replication sink metrics information" do + replication_status("replication", "sink") + end end end diff --git a/hbase-shell/src/test/ruby/test_helper.rb b/hbase-shell/src/test/ruby/test_helper.rb index 55797610614..5dfafc5657a 100644 --- a/hbase-shell/src/test/ruby/test_helper.rb +++ b/hbase-shell/src/test/ruby/test_helper.rb @@ -94,6 +94,10 @@ module Hbase puts "IGNORING DROP TABLE ERROR: #{e}" end end + + def replication_status(format,type) + return admin.status(format,type) + end end end