From b936444e34871f81d2cc8d0cea5898d3e3f683df Mon Sep 17 00:00:00 2001 From: Devaraj Das Date: Tue, 27 Aug 2013 22:12:21 +0000 Subject: [PATCH] HBASE-9116. Adds a view/edit tool for favored nodes mapping. Also implements the FavoredNodeLoadBalancer.balanceCluster method. git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1518016 13f79535-47bb-0310-9956-ffa450edef68 --- dev-support/findbugs-exclude.xml | 8 + .../hbase/protobuf/RequestConverter.java | 23 +- .../src/main/resources/hbase-default.xml | 4 + .../hbase/protobuf/generated/AdminProtos.java | 2311 ++++++++++++++++- hbase-protocol/src/main/protobuf/Admin.proto | 16 + .../hbase/master/AssignmentManager.java | 26 +- .../master/AssignmentVerificationReport.java | 598 +++++ .../master/RegionPlacementMaintainer.java | 1114 ++++++++ .../SnapshotOfRegionAssignmentFromMeta.java | 217 ++ .../balancer/FavoredNodeAssignmentHelper.java | 254 +- .../balancer/FavoredNodeLoadBalancer.java | 106 +- .../master/balancer/FavoredNodesPlan.java | 155 ++ .../hbase/regionserver/HRegionServer.java | 16 + .../hadoop/hbase/util/FSRegionScanner.java | 169 ++ .../org/apache/hadoop/hbase/util/FSUtils.java | 184 ++ .../hadoop/hbase/util/MunkresAssignment.java | 516 ++++ .../hadoop/hbase/master/MockRegionServer.java | 8 + .../hbase/master/TestRegionPlacement.java | 277 +- 18 files changed, 5845 insertions(+), 157 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodesPlan.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java diff --git a/dev-support/findbugs-exclude.xml b/dev-support/findbugs-exclude.xml index da0abc92541..67e633ef685 100644 --- a/dev-support/findbugs-exclude.xml +++ b/dev-support/findbugs-exclude.xml @@ -241,4 +241,12 @@ + + + + + + diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index b97d7916fd3..6b3293cec95 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -55,6 +55,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest. import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath; @@ -714,6 +716,25 @@ public final class RequestConverter { return builder.build(); } + /** + * Create a protocol buffer UpdateFavoredNodesRequest to update a list of favorednode mappings + * @param updateRegionInfos + * @return a protocol buffer UpdateFavoredNodesRequest + */ + public static UpdateFavoredNodesRequest buildUpdateFavoredNodesRequest( + final List>> updateRegionInfos) { + UpdateFavoredNodesRequest.Builder ubuilder = UpdateFavoredNodesRequest.newBuilder(); + for (Pair> pair : updateRegionInfos) { + RegionUpdateInfo.Builder builder = RegionUpdateInfo.newBuilder(); + builder.setRegion(HRegionInfo.convert(pair.getFirst())); + for (ServerName server : pair.getSecond()) { + builder.addFavoredNodes(ProtobufUtil.toServerName(server)); + } + ubuilder.addUpdateInfo(builder.build()); + } + return ubuilder.build(); + } + /** * Create a CloseRegionRequest for a given region name * @@ -1441,4 +1462,4 @@ public final class RequestConverter { } return builder.build(); } -} \ No newline at end of file +} diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index e0b30628dd3..ad13e3d99a6 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -466,6 +466,10 @@ possible configurations would overwhelm and obscure the important. 60000 Client scanner lease period in milliseconds. + + hbase.client.localityCheck.threadPoolSize + 2 + diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java index 33b7e05f488..3390e1e387e 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java @@ -10904,6 +10904,2102 @@ public final class AdminProtos { // @@protoc_insertion_point(class_scope:CompactRegionResponse) } + public interface UpdateFavoredNodesRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + java.util.List + getUpdateInfoList(); + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo getUpdateInfo(int index); + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + int getUpdateInfoCount(); + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + java.util.List + getUpdateInfoOrBuilderList(); + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfoOrBuilder getUpdateInfoOrBuilder( + int index); + } + /** + * Protobuf type {@code UpdateFavoredNodesRequest} + */ + public static final class UpdateFavoredNodesRequest extends + com.google.protobuf.GeneratedMessage + implements UpdateFavoredNodesRequestOrBuilder { + // Use UpdateFavoredNodesRequest.newBuilder() to construct. + private UpdateFavoredNodesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private UpdateFavoredNodesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final UpdateFavoredNodesRequest defaultInstance; + public static UpdateFavoredNodesRequest getDefaultInstance() { + return defaultInstance; + } + + public UpdateFavoredNodesRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private UpdateFavoredNodesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + updateInfo_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + updateInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + updateInfo_ = java.util.Collections.unmodifiableList(updateInfo_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public UpdateFavoredNodesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new UpdateFavoredNodesRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface RegionUpdateInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .RegionInfo region = 1; + /** + * required .RegionInfo region = 1; + */ + boolean hasRegion(); + /** + * required .RegionInfo region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion(); + /** + * required .RegionInfo region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder(); + + // repeated .ServerName favored_nodes = 2; + /** + * repeated .ServerName favored_nodes = 2; + */ + java.util.List + getFavoredNodesList(); + /** + * repeated .ServerName favored_nodes = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNodes(int index); + /** + * repeated .ServerName favored_nodes = 2; + */ + int getFavoredNodesCount(); + /** + * repeated .ServerName favored_nodes = 2; + */ + java.util.List + getFavoredNodesOrBuilderList(); + /** + * repeated .ServerName favored_nodes = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodesOrBuilder( + int index); + } + /** + * Protobuf type {@code UpdateFavoredNodesRequest.RegionUpdateInfo} + */ + public static final class RegionUpdateInfo extends + com.google.protobuf.GeneratedMessage + implements RegionUpdateInfoOrBuilder { + // Use RegionUpdateInfo.newBuilder() to construct. + private RegionUpdateInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RegionUpdateInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RegionUpdateInfo defaultInstance; + public static RegionUpdateInfo getDefaultInstance() { + return defaultInstance; + } + + public RegionUpdateInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegionUpdateInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = region_.toBuilder(); + } + region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(region_); + region_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + favoredNodes_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + favoredNodes_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + favoredNodes_ = java.util.Collections.unmodifiableList(favoredNodes_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesRequest_RegionUpdateInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesRequest_RegionUpdateInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RegionUpdateInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RegionUpdateInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .RegionInfo region = 1; + public static final int REGION_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo region_; + /** + * required .RegionInfo region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .RegionInfo region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion() { + return region_; + } + /** + * required .RegionInfo region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() { + return region_; + } + + // repeated .ServerName favored_nodes = 2; + public static final int FAVORED_NODES_FIELD_NUMBER = 2; + private java.util.List favoredNodes_; + /** + * repeated .ServerName favored_nodes = 2; + */ + public java.util.List getFavoredNodesList() { + return favoredNodes_; + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public java.util.List + getFavoredNodesOrBuilderList() { + return favoredNodes_; + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public int getFavoredNodesCount() { + return favoredNodes_.size(); + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNodes(int index) { + return favoredNodes_.get(index); + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodesOrBuilder( + int index) { + return favoredNodes_.get(index); + } + + private void initFields() { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + favoredNodes_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegion()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegion().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getFavoredNodesCount(); i++) { + if (!getFavoredNodes(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, region_); + } + for (int i = 0; i < favoredNodes_.size(); i++) { + output.writeMessage(2, favoredNodes_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, region_); + } + for (int i = 0; i < favoredNodes_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, favoredNodes_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo) obj; + + boolean result = true; + result = result && (hasRegion() == other.hasRegion()); + if (hasRegion()) { + result = result && getRegion() + .equals(other.getRegion()); + } + result = result && getFavoredNodesList() + .equals(other.getFavoredNodesList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegion()) { + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + } + if (getFavoredNodesCount() > 0) { + hash = (37 * hash) + FAVORED_NODES_FIELD_NUMBER; + hash = (53 * hash) + getFavoredNodesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code UpdateFavoredNodesRequest.RegionUpdateInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesRequest_RegionUpdateInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesRequest_RegionUpdateInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionFieldBuilder(); + getFavoredNodesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (favoredNodesBuilder_ == null) { + favoredNodes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + favoredNodesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesRequest_RegionUpdateInfo_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionBuilder_ == null) { + result.region_ = region_; + } else { + result.region_ = regionBuilder_.build(); + } + if (favoredNodesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + favoredNodes_ = java.util.Collections.unmodifiableList(favoredNodes_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.favoredNodes_ = favoredNodes_; + } else { + result.favoredNodes_ = favoredNodesBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.getDefaultInstance()) return this; + if (other.hasRegion()) { + mergeRegion(other.getRegion()); + } + if (favoredNodesBuilder_ == null) { + if (!other.favoredNodes_.isEmpty()) { + if (favoredNodes_.isEmpty()) { + favoredNodes_ = other.favoredNodes_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureFavoredNodesIsMutable(); + favoredNodes_.addAll(other.favoredNodes_); + } + onChanged(); + } + } else { + if (!other.favoredNodes_.isEmpty()) { + if (favoredNodesBuilder_.isEmpty()) { + favoredNodesBuilder_.dispose(); + favoredNodesBuilder_ = null; + favoredNodes_ = other.favoredNodes_; + bitField0_ = (bitField0_ & ~0x00000002); + favoredNodesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getFavoredNodesFieldBuilder() : null; + } else { + favoredNodesBuilder_.addAllMessages(other.favoredNodes_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegion()) { + + return false; + } + if (!getRegion().isInitialized()) { + + return false; + } + for (int i = 0; i < getFavoredNodesCount(); i++) { + if (!getFavoredNodes(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .RegionInfo region = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionBuilder_; + /** + * required .RegionInfo region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .RegionInfo region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion() { + if (regionBuilder_ == null) { + return region_; + } else { + return regionBuilder_.getMessage(); + } + } + /** + * required .RegionInfo region = 1; + */ + public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + region_ = value; + onChanged(); + } else { + regionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .RegionInfo region = 1; + */ + public Builder setRegion( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionBuilder_ == null) { + region_ = builderForValue.build(); + onChanged(); + } else { + regionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .RegionInfo region = 1; + */ + public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + region_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(region_).mergeFrom(value).buildPartial(); + } else { + region_ = value; + } + onChanged(); + } else { + regionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .RegionInfo region = 1; + */ + public Builder clearRegion() { + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + onChanged(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .RegionInfo region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionFieldBuilder().getBuilder(); + } + /** + * required .RegionInfo region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() { + if (regionBuilder_ != null) { + return regionBuilder_.getMessageOrBuilder(); + } else { + return region_; + } + } + /** + * required .RegionInfo region = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionFieldBuilder() { + if (regionBuilder_ == null) { + regionBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + region_, + getParentForChildren(), + isClean()); + region_ = null; + } + return regionBuilder_; + } + + // repeated .ServerName favored_nodes = 2; + private java.util.List favoredNodes_ = + java.util.Collections.emptyList(); + private void ensureFavoredNodesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + favoredNodes_ = new java.util.ArrayList(favoredNodes_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> favoredNodesBuilder_; + + /** + * repeated .ServerName favored_nodes = 2; + */ + public java.util.List getFavoredNodesList() { + if (favoredNodesBuilder_ == null) { + return java.util.Collections.unmodifiableList(favoredNodes_); + } else { + return favoredNodesBuilder_.getMessageList(); + } + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public int getFavoredNodesCount() { + if (favoredNodesBuilder_ == null) { + return favoredNodes_.size(); + } else { + return favoredNodesBuilder_.getCount(); + } + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNodes(int index) { + if (favoredNodesBuilder_ == null) { + return favoredNodes_.get(index); + } else { + return favoredNodesBuilder_.getMessage(index); + } + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public Builder setFavoredNodes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (favoredNodesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFavoredNodesIsMutable(); + favoredNodes_.set(index, value); + onChanged(); + } else { + favoredNodesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public Builder setFavoredNodes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (favoredNodesBuilder_ == null) { + ensureFavoredNodesIsMutable(); + favoredNodes_.set(index, builderForValue.build()); + onChanged(); + } else { + favoredNodesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public Builder addFavoredNodes(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (favoredNodesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFavoredNodesIsMutable(); + favoredNodes_.add(value); + onChanged(); + } else { + favoredNodesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public Builder addFavoredNodes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (favoredNodesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFavoredNodesIsMutable(); + favoredNodes_.add(index, value); + onChanged(); + } else { + favoredNodesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public Builder addFavoredNodes( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (favoredNodesBuilder_ == null) { + ensureFavoredNodesIsMutable(); + favoredNodes_.add(builderForValue.build()); + onChanged(); + } else { + favoredNodesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public Builder addFavoredNodes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (favoredNodesBuilder_ == null) { + ensureFavoredNodesIsMutable(); + favoredNodes_.add(index, builderForValue.build()); + onChanged(); + } else { + favoredNodesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public Builder addAllFavoredNodes( + java.lang.Iterable values) { + if (favoredNodesBuilder_ == null) { + ensureFavoredNodesIsMutable(); + super.addAll(values, favoredNodes_); + onChanged(); + } else { + favoredNodesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public Builder clearFavoredNodes() { + if (favoredNodesBuilder_ == null) { + favoredNodes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + favoredNodesBuilder_.clear(); + } + return this; + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public Builder removeFavoredNodes(int index) { + if (favoredNodesBuilder_ == null) { + ensureFavoredNodesIsMutable(); + favoredNodes_.remove(index); + onChanged(); + } else { + favoredNodesBuilder_.remove(index); + } + return this; + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getFavoredNodesBuilder( + int index) { + return getFavoredNodesFieldBuilder().getBuilder(index); + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodesOrBuilder( + int index) { + if (favoredNodesBuilder_ == null) { + return favoredNodes_.get(index); } else { + return favoredNodesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public java.util.List + getFavoredNodesOrBuilderList() { + if (favoredNodesBuilder_ != null) { + return favoredNodesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(favoredNodes_); + } + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addFavoredNodesBuilder() { + return getFavoredNodesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addFavoredNodesBuilder( + int index) { + return getFavoredNodesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .ServerName favored_nodes = 2; + */ + public java.util.List + getFavoredNodesBuilderList() { + return getFavoredNodesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getFavoredNodesFieldBuilder() { + if (favoredNodesBuilder_ == null) { + favoredNodesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + favoredNodes_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + favoredNodes_ = null; + } + return favoredNodesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:UpdateFavoredNodesRequest.RegionUpdateInfo) + } + + static { + defaultInstance = new RegionUpdateInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:UpdateFavoredNodesRequest.RegionUpdateInfo) + } + + // repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + public static final int UPDATE_INFO_FIELD_NUMBER = 1; + private java.util.List updateInfo_; + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public java.util.List getUpdateInfoList() { + return updateInfo_; + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public java.util.List + getUpdateInfoOrBuilderList() { + return updateInfo_; + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public int getUpdateInfoCount() { + return updateInfo_.size(); + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo getUpdateInfo(int index) { + return updateInfo_.get(index); + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfoOrBuilder getUpdateInfoOrBuilder( + int index) { + return updateInfo_.get(index); + } + + private void initFields() { + updateInfo_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getUpdateInfoCount(); i++) { + if (!getUpdateInfo(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < updateInfo_.size(); i++) { + output.writeMessage(1, updateInfo_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < updateInfo_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, updateInfo_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest) obj; + + boolean result = true; + result = result && getUpdateInfoList() + .equals(other.getUpdateInfoList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getUpdateInfoCount() > 0) { + hash = (37 * hash) + UPDATE_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUpdateInfoList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code UpdateFavoredNodesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUpdateInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (updateInfoBuilder_ == null) { + updateInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + updateInfoBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest(this); + int from_bitField0_ = bitField0_; + if (updateInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + updateInfo_ = java.util.Collections.unmodifiableList(updateInfo_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.updateInfo_ = updateInfo_; + } else { + result.updateInfo_ = updateInfoBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance()) return this; + if (updateInfoBuilder_ == null) { + if (!other.updateInfo_.isEmpty()) { + if (updateInfo_.isEmpty()) { + updateInfo_ = other.updateInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureUpdateInfoIsMutable(); + updateInfo_.addAll(other.updateInfo_); + } + onChanged(); + } + } else { + if (!other.updateInfo_.isEmpty()) { + if (updateInfoBuilder_.isEmpty()) { + updateInfoBuilder_.dispose(); + updateInfoBuilder_ = null; + updateInfo_ = other.updateInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + updateInfoBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getUpdateInfoFieldBuilder() : null; + } else { + updateInfoBuilder_.addAllMessages(other.updateInfo_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getUpdateInfoCount(); i++) { + if (!getUpdateInfo(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + private java.util.List updateInfo_ = + java.util.Collections.emptyList(); + private void ensureUpdateInfoIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + updateInfo_ = new java.util.ArrayList(updateInfo_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfoOrBuilder> updateInfoBuilder_; + + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public java.util.List getUpdateInfoList() { + if (updateInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(updateInfo_); + } else { + return updateInfoBuilder_.getMessageList(); + } + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public int getUpdateInfoCount() { + if (updateInfoBuilder_ == null) { + return updateInfo_.size(); + } else { + return updateInfoBuilder_.getCount(); + } + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo getUpdateInfo(int index) { + if (updateInfoBuilder_ == null) { + return updateInfo_.get(index); + } else { + return updateInfoBuilder_.getMessage(index); + } + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public Builder setUpdateInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo value) { + if (updateInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureUpdateInfoIsMutable(); + updateInfo_.set(index, value); + onChanged(); + } else { + updateInfoBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public Builder setUpdateInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder builderForValue) { + if (updateInfoBuilder_ == null) { + ensureUpdateInfoIsMutable(); + updateInfo_.set(index, builderForValue.build()); + onChanged(); + } else { + updateInfoBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public Builder addUpdateInfo(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo value) { + if (updateInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureUpdateInfoIsMutable(); + updateInfo_.add(value); + onChanged(); + } else { + updateInfoBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public Builder addUpdateInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo value) { + if (updateInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureUpdateInfoIsMutable(); + updateInfo_.add(index, value); + onChanged(); + } else { + updateInfoBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public Builder addUpdateInfo( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder builderForValue) { + if (updateInfoBuilder_ == null) { + ensureUpdateInfoIsMutable(); + updateInfo_.add(builderForValue.build()); + onChanged(); + } else { + updateInfoBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public Builder addUpdateInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder builderForValue) { + if (updateInfoBuilder_ == null) { + ensureUpdateInfoIsMutable(); + updateInfo_.add(index, builderForValue.build()); + onChanged(); + } else { + updateInfoBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public Builder addAllUpdateInfo( + java.lang.Iterable values) { + if (updateInfoBuilder_ == null) { + ensureUpdateInfoIsMutable(); + super.addAll(values, updateInfo_); + onChanged(); + } else { + updateInfoBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public Builder clearUpdateInfo() { + if (updateInfoBuilder_ == null) { + updateInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + updateInfoBuilder_.clear(); + } + return this; + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public Builder removeUpdateInfo(int index) { + if (updateInfoBuilder_ == null) { + ensureUpdateInfoIsMutable(); + updateInfo_.remove(index); + onChanged(); + } else { + updateInfoBuilder_.remove(index); + } + return this; + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder getUpdateInfoBuilder( + int index) { + return getUpdateInfoFieldBuilder().getBuilder(index); + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfoOrBuilder getUpdateInfoOrBuilder( + int index) { + if (updateInfoBuilder_ == null) { + return updateInfo_.get(index); } else { + return updateInfoBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public java.util.List + getUpdateInfoOrBuilderList() { + if (updateInfoBuilder_ != null) { + return updateInfoBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(updateInfo_); + } + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder addUpdateInfoBuilder() { + return getUpdateInfoFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.getDefaultInstance()); + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder addUpdateInfoBuilder( + int index) { + return getUpdateInfoFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.getDefaultInstance()); + } + /** + * repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1; + */ + public java.util.List + getUpdateInfoBuilderList() { + return getUpdateInfoFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfoOrBuilder> + getUpdateInfoFieldBuilder() { + if (updateInfoBuilder_ == null) { + updateInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfoOrBuilder>( + updateInfo_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + updateInfo_ = null; + } + return updateInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:UpdateFavoredNodesRequest) + } + + static { + defaultInstance = new UpdateFavoredNodesRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:UpdateFavoredNodesRequest) + } + + public interface UpdateFavoredNodesResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional uint32 response = 1; + /** + * optional uint32 response = 1; + */ + boolean hasResponse(); + /** + * optional uint32 response = 1; + */ + int getResponse(); + } + /** + * Protobuf type {@code UpdateFavoredNodesResponse} + */ + public static final class UpdateFavoredNodesResponse extends + com.google.protobuf.GeneratedMessage + implements UpdateFavoredNodesResponseOrBuilder { + // Use UpdateFavoredNodesResponse.newBuilder() to construct. + private UpdateFavoredNodesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private UpdateFavoredNodesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final UpdateFavoredNodesResponse defaultInstance; + public static UpdateFavoredNodesResponse getDefaultInstance() { + return defaultInstance; + } + + public UpdateFavoredNodesResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private UpdateFavoredNodesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + response_ = input.readUInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public UpdateFavoredNodesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new UpdateFavoredNodesResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional uint32 response = 1; + public static final int RESPONSE_FIELD_NUMBER = 1; + private int response_; + /** + * optional uint32 response = 1; + */ + public boolean hasResponse() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint32 response = 1; + */ + public int getResponse() { + return response_; + } + + private void initFields() { + response_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, response_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, response_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse) obj; + + boolean result = true; + result = result && (hasResponse() == other.hasResponse()); + if (hasResponse()) { + result = result && (getResponse() + == other.getResponse()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasResponse()) { + hash = (37 * hash) + RESPONSE_FIELD_NUMBER; + hash = (53 * hash) + getResponse(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code UpdateFavoredNodesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + response_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.response_ = response_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance()) return this; + if (other.hasResponse()) { + setResponse(other.getResponse()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional uint32 response = 1; + private int response_ ; + /** + * optional uint32 response = 1; + */ + public boolean hasResponse() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint32 response = 1; + */ + public int getResponse() { + return response_; + } + /** + * optional uint32 response = 1; + */ + public Builder setResponse(int value) { + bitField0_ |= 0x00000001; + response_ = value; + onChanged(); + return this; + } + /** + * optional uint32 response = 1; + */ + public Builder clearResponse() { + bitField0_ = (bitField0_ & ~0x00000001); + response_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:UpdateFavoredNodesResponse) + } + + static { + defaultInstance = new UpdateFavoredNodesResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:UpdateFavoredNodesResponse) + } + public interface MergeRegionsRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -17534,6 +19630,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc UpdateFavoredNodes(.UpdateFavoredNodesRequest) returns (.UpdateFavoredNodesResponse); + */ + public abstract void updateFavoredNodes( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -17651,6 +19755,14 @@ public final class AdminProtos { impl.stopServer(controller, request, done); } + @java.lang.Override + public void updateFavoredNodes( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request, + com.google.protobuf.RpcCallback done) { + impl.updateFavoredNodes(controller, request, done); + } + }; } @@ -17701,6 +19813,8 @@ public final class AdminProtos { return impl.getServerInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest)request); case 13: return impl.stopServer(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest)request); + case 14: + return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -17743,6 +19857,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); case 13: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); + case 14: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -17785,6 +19901,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); case 13: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); + case 14: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -17905,6 +20023,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc UpdateFavoredNodes(.UpdateFavoredNodesRequest) returns (.UpdateFavoredNodesResponse); + */ + public abstract void updateFavoredNodes( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -17997,6 +20123,11 @@ public final class AdminProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 14: + this.updateFavoredNodes(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -18039,6 +20170,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); case 13: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); + case 14: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -18081,6 +20214,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); case 13: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); + case 14: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -18311,6 +20446,21 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance())); } + + public void updateFavoredNodes( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(14), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -18388,6 +20538,11 @@ public final class AdminProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse updateFavoredNodes( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -18564,6 +20719,18 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse updateFavoredNodes( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(14), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:AdminService) @@ -18654,6 +20821,21 @@ public final class AdminProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_CompactRegionResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_UpdateFavoredNodesRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_UpdateFavoredNodesRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_UpdateFavoredNodesRequest_RegionUpdateInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_UpdateFavoredNodesRequest_RegionUpdateInfo_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_UpdateFavoredNodesResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_UpdateFavoredNodesResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_MergeRegionsRequest_descriptor; private static @@ -18759,46 +20941,53 @@ public final class AdminProtos { "split_point\030\002 \001(\014\"\025\n\023SplitRegionResponse" + "\"W\n\024CompactRegionRequest\022 \n\006region\030\001 \002(\013" + "2\020.RegionSpecifier\022\r\n\005major\030\002 \001(\010\022\016\n\006fam" + - "ily\030\003 \001(\014\"\027\n\025CompactRegionResponse\"v\n\023Me" + - "rgeRegionsRequest\022\"\n\010region_a\030\001 \002(\0132\020.Re" + - "gionSpecifier\022\"\n\010region_b\030\002 \002(\0132\020.Region" + - "Specifier\022\027\n\010forcible\030\003 \001(\010:\005false\"\026\n\024Me", - "rgeRegionsResponse\"X\n\010WALEntry\022\024\n\003key\030\001 " + - "\002(\0132\007.WALKey\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n" + - "\025associated_cell_count\030\003 \001(\005\"4\n\030Replicat" + - "eWALEntryRequest\022\030\n\005entry\030\001 \003(\0132\t.WALEnt" + - "ry\"\033\n\031ReplicateWALEntryResponse\"\026\n\024RollW" + - "ALWriterRequest\"0\n\025RollWALWriterResponse" + - "\022\027\n\017region_to_flush\030\001 \003(\014\"#\n\021StopServerR" + - "equest\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopServerResp" + - "onse\"\026\n\024GetServerInfoRequest\"B\n\nServerIn" + - "fo\022 \n\013server_name\030\001 \002(\0132\013.ServerName\022\022\n\n", - "webui_port\030\002 \001(\r\"9\n\025GetServerInfoRespons" + - "e\022 \n\013server_info\030\001 \002(\0132\013.ServerInfo2\337\006\n\014" + - "AdminService\022>\n\rGetRegionInfo\022\025.GetRegio" + - "nInfoRequest\032\026.GetRegionInfoResponse\022;\n\014" + - "GetStoreFile\022\024.GetStoreFileRequest\032\025.Get" + - "StoreFileResponse\022D\n\017GetOnlineRegion\022\027.G" + - "etOnlineRegionRequest\032\030.GetOnlineRegionR" + - "esponse\0225\n\nOpenRegion\022\022.OpenRegionReques" + - "t\032\023.OpenRegionResponse\0228\n\013CloseRegion\022\023." + - "CloseRegionRequest\032\024.CloseRegionResponse", - "\0228\n\013FlushRegion\022\023.FlushRegionRequest\032\024.F" + - "lushRegionResponse\0228\n\013SplitRegion\022\023.Spli" + - "tRegionRequest\032\024.SplitRegionResponse\022>\n\r" + - "CompactRegion\022\025.CompactRegionRequest\032\026.C" + - "ompactRegionResponse\022;\n\014MergeRegions\022\024.M" + - "ergeRegionsRequest\032\025.MergeRegionsRespons" + - "e\022J\n\021ReplicateWALEntry\022\031.ReplicateWALEnt" + - "ryRequest\032\032.ReplicateWALEntryResponse\022\'\n" + - "\006Replay\022\r.MultiRequest\032\016.MultiResponse\022>" + - "\n\rRollWALWriter\022\025.RollWALWriterRequest\032\026", - ".RollWALWriterResponse\022>\n\rGetServerInfo\022" + - "\025.GetServerInfoRequest\032\026.GetServerInfoRe" + - "sponse\0225\n\nStopServer\022\022.StopServerRequest" + - "\032\023.StopServerResponseBA\n*org.apache.hado" + - "op.hbase.protobuf.generatedB\013AdminProtos" + - "H\001\210\001\001\240\001\001" + "ily\030\003 \001(\014\"\027\n\025CompactRegionResponse\"\262\001\n\031U" + + "pdateFavoredNodesRequest\022@\n\013update_info\030" + + "\001 \003(\0132+.UpdateFavoredNodesRequest.Region" + + "UpdateInfo\032S\n\020RegionUpdateInfo\022\033\n\006region", + "\030\001 \002(\0132\013.RegionInfo\022\"\n\rfavored_nodes\030\002 \003" + + "(\0132\013.ServerName\".\n\032UpdateFavoredNodesRes" + + "ponse\022\020\n\010response\030\001 \001(\r\"v\n\023MergeRegionsR" + + "equest\022\"\n\010region_a\030\001 \002(\0132\020.RegionSpecifi" + + "er\022\"\n\010region_b\030\002 \002(\0132\020.RegionSpecifier\022\027" + + "\n\010forcible\030\003 \001(\010:\005false\"\026\n\024MergeRegionsR" + + "esponse\"X\n\010WALEntry\022\024\n\003key\030\001 \002(\0132\007.WALKe" + + "y\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025associated" + + "_cell_count\030\003 \001(\005\"4\n\030ReplicateWALEntryRe" + + "quest\022\030\n\005entry\030\001 \003(\0132\t.WALEntry\"\033\n\031Repli", + "cateWALEntryResponse\"\026\n\024RollWALWriterReq" + + "uest\"0\n\025RollWALWriterResponse\022\027\n\017region_" + + "to_flush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006r" + + "eason\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024Get" + + "ServerInfoRequest\"B\n\nServerInfo\022 \n\013serve" + + "r_name\030\001 \002(\0132\013.ServerName\022\022\n\nwebui_port\030" + + "\002 \001(\r\"9\n\025GetServerInfoResponse\022 \n\013server" + + "_info\030\001 \002(\0132\013.ServerInfo2\256\007\n\014AdminServic" + + "e\022>\n\rGetRegionInfo\022\025.GetRegionInfoReques" + + "t\032\026.GetRegionInfoResponse\022;\n\014GetStoreFil", + "e\022\024.GetStoreFileRequest\032\025.GetStoreFileRe" + + "sponse\022D\n\017GetOnlineRegion\022\027.GetOnlineReg" + + "ionRequest\032\030.GetOnlineRegionResponse\0225\n\n" + + "OpenRegion\022\022.OpenRegionRequest\032\023.OpenReg" + + "ionResponse\0228\n\013CloseRegion\022\023.CloseRegion" + + "Request\032\024.CloseRegionResponse\0228\n\013FlushRe" + + "gion\022\023.FlushRegionRequest\032\024.FlushRegionR" + + "esponse\0228\n\013SplitRegion\022\023.SplitRegionRequ" + + "est\032\024.SplitRegionResponse\022>\n\rCompactRegi" + + "on\022\025.CompactRegionRequest\032\026.CompactRegio", + "nResponse\022;\n\014MergeRegions\022\024.MergeRegions" + + "Request\032\025.MergeRegionsResponse\022J\n\021Replic" + + "ateWALEntry\022\031.ReplicateWALEntryRequest\032\032" + + ".ReplicateWALEntryResponse\022\'\n\006Replay\022\r.M" + + "ultiRequest\032\016.MultiResponse\022>\n\rRollWALWr" + + "iter\022\025.RollWALWriterRequest\032\026.RollWALWri" + + "terResponse\022>\n\rGetServerInfo\022\025.GetServer" + + "InfoRequest\032\026.GetServerInfoResponse\0225\n\nS" + + "topServer\022\022.StopServerRequest\032\023.StopServ" + + "erResponse\022M\n\022UpdateFavoredNodes\022\032.Updat", + "eFavoredNodesRequest\032\033.UpdateFavoredNode" + + "sResponseBA\n*org.apache.hadoop.hbase.pro" + + "tobuf.generatedB\013AdminProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -18907,74 +21096,92 @@ public final class AdminProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CompactRegionResponse_descriptor, new java.lang.String[] { }); - internal_static_MergeRegionsRequest_descriptor = + internal_static_UpdateFavoredNodesRequest_descriptor = getDescriptor().getMessageTypes().get(16); + internal_static_UpdateFavoredNodesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_UpdateFavoredNodesRequest_descriptor, + new java.lang.String[] { "UpdateInfo", }); + internal_static_UpdateFavoredNodesRequest_RegionUpdateInfo_descriptor = + internal_static_UpdateFavoredNodesRequest_descriptor.getNestedTypes().get(0); + internal_static_UpdateFavoredNodesRequest_RegionUpdateInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_UpdateFavoredNodesRequest_RegionUpdateInfo_descriptor, + new java.lang.String[] { "Region", "FavoredNodes", }); + internal_static_UpdateFavoredNodesResponse_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_UpdateFavoredNodesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_UpdateFavoredNodesResponse_descriptor, + new java.lang.String[] { "Response", }); + internal_static_MergeRegionsRequest_descriptor = + getDescriptor().getMessageTypes().get(18); internal_static_MergeRegionsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MergeRegionsRequest_descriptor, new java.lang.String[] { "RegionA", "RegionB", "Forcible", }); internal_static_MergeRegionsResponse_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(19); internal_static_MergeRegionsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MergeRegionsResponse_descriptor, new java.lang.String[] { }); internal_static_WALEntry_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(20); internal_static_WALEntry_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_WALEntry_descriptor, new java.lang.String[] { "Key", "KeyValueBytes", "AssociatedCellCount", }); internal_static_ReplicateWALEntryRequest_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(21); internal_static_ReplicateWALEntryRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicateWALEntryRequest_descriptor, new java.lang.String[] { "Entry", }); internal_static_ReplicateWALEntryResponse_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(22); internal_static_ReplicateWALEntryResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicateWALEntryResponse_descriptor, new java.lang.String[] { }); internal_static_RollWALWriterRequest_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(23); internal_static_RollWALWriterRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RollWALWriterRequest_descriptor, new java.lang.String[] { }); internal_static_RollWALWriterResponse_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(24); internal_static_RollWALWriterResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RollWALWriterResponse_descriptor, new java.lang.String[] { "RegionToFlush", }); internal_static_StopServerRequest_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(25); internal_static_StopServerRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_StopServerRequest_descriptor, new java.lang.String[] { "Reason", }); internal_static_StopServerResponse_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(26); internal_static_StopServerResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_StopServerResponse_descriptor, new java.lang.String[] { }); internal_static_GetServerInfoRequest_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(27); internal_static_GetServerInfoRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetServerInfoRequest_descriptor, new java.lang.String[] { }); internal_static_ServerInfo_descriptor = - getDescriptor().getMessageTypes().get(26); + getDescriptor().getMessageTypes().get(28); internal_static_ServerInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ServerInfo_descriptor, new java.lang.String[] { "ServerName", "WebuiPort", }); internal_static_GetServerInfoResponse_descriptor = - getDescriptor().getMessageTypes().get(27); + getDescriptor().getMessageTypes().get(29); internal_static_GetServerInfoResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetServerInfoResponse_descriptor, diff --git a/hbase-protocol/src/main/protobuf/Admin.proto b/hbase-protocol/src/main/protobuf/Admin.proto index 73113141ead..1d0fdbd4426 100644 --- a/hbase-protocol/src/main/protobuf/Admin.proto +++ b/hbase-protocol/src/main/protobuf/Admin.proto @@ -145,6 +145,19 @@ message CompactRegionRequest { message CompactRegionResponse { } +message UpdateFavoredNodesRequest { + repeated RegionUpdateInfo update_info = 1; + + message RegionUpdateInfo { + required RegionInfo region = 1; + repeated ServerName favored_nodes = 2; + } +} + +message UpdateFavoredNodesResponse { + optional uint32 response = 1; +} + /** * Merges the specified regions. *

@@ -251,4 +264,7 @@ service AdminService { rpc StopServer(StopServerRequest) returns(StopServerResponse); + + rpc UpdateFavoredNodes(UpdateFavoredNodesRequest) + returns(UpdateFavoredNodesResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 2df618558ef..d9ead3ba675 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper; import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer; +import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan; import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler; import org.apache.hadoop.hbase.master.handler.DisableTableHandler; import org.apache.hadoop.hbase.master.handler.EnableTableHandler; @@ -126,6 +127,8 @@ public class AssignmentManager extends ZooKeeperListener { private final TableLockManager tableLockManager; + private AtomicInteger numRegionsOpened = new AtomicInteger(0); + final private KeyLocker locker = new KeyLocker(); /** @@ -1366,7 +1369,7 @@ public class AssignmentManager extends ZooKeeperListener { LOG.warn("A region was opened on a dead server, ServerName=" + sn + ", region=" + regionInfo.getEncodedName()); } - + numRegionsOpened.incrementAndGet(); regionStates.regionOnline(regionInfo, sn); // Remove plan if one. @@ -2411,6 +2414,15 @@ public class AssignmentManager extends ZooKeeperListener { return result; } + /** + * Used by unit tests. Return the number of regions opened so far in the life + * of the master. Increases by one every time the master opens a region + * @return the counter value of the number of regions opened so far + */ + public int getNumRegionsOpened() { + return numRegionsOpened.get(); + } + /** * Waits until the specified region has completed assignment. *

@@ -2558,14 +2570,10 @@ public class AssignmentManager extends ZooKeeperListener { disabledOrDisablingOrEnabling.addAll(ZKTable.getEnablingTables(watcher)); // Scan META for all user regions, skipping any disabled tables Map allRegions; - if (this.shouldAssignRegionsWithFavoredNodes) { - allRegions = FavoredNodeAssignmentHelper.fullScan( - catalogTracker, disabledOrDisablingOrEnabling, true, (FavoredNodeLoadBalancer)balancer); - } else { - allRegions = MetaReader.fullScan( - catalogTracker, disabledOrDisablingOrEnabling, true); - } - + SnapshotOfRegionAssignmentFromMeta snapshotOfRegionAssignment = + new SnapshotOfRegionAssignmentFromMeta(catalogTracker, disabledOrDisablingOrEnabling, true); + snapshotOfRegionAssignment.initialize(); + allRegions = snapshotOfRegionAssignment.getRegionToRegionServerMap(); if (allRegions == null) return; //remove system tables because they would have been assigned earlier diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java new file mode 100644 index 00000000000..b84014ac618 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java @@ -0,0 +1,598 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.text.DecimalFormat; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper; +import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan; +/** + * Helper class that is used by {@link RegionPlacementMaintainer} to print + * information for favored nodes + * + */ +@InterfaceAudience.Private +public class AssignmentVerificationReport { + protected static final Log LOG = LogFactory.getLog( + AssignmentVerificationReport.class.getName()); + + private TableName tableName = null; + private boolean enforceLocality = false; + private boolean isFilledUp = false; + + private int totalRegions = 0; + private int totalRegionServers = 0; + // for unassigned regions + private List unAssignedRegionsList = + new ArrayList(); + + // For regions without valid favored nodes + private List regionsWithoutValidFavoredNodes = + new ArrayList(); + + // For regions not running on the favored nodes + private List nonFavoredAssignedRegionList = + new ArrayList(); + + // For regions running on the favored nodes + private int totalFavoredAssignments = 0; + private int[] favoredNodes = new int[FavoredNodeAssignmentHelper.FAVORED_NODES_NUM]; + private float[] favoredNodesLocalitySummary = + new float[FavoredNodeAssignmentHelper.FAVORED_NODES_NUM]; + private float actualLocalitySummary = 0; + + // For region balancing information + private float avgRegionsOnRS = 0; + private int maxRegionsOnRS = 0; + private int minRegionsOnRS = Integer.MAX_VALUE; + private Set mostLoadedRSSet = + new HashSet(); + private Set leastLoadedRSSet = + new HashSet(); + + private float avgDispersionScore = 0; + private float maxDispersionScore = 0; + private Set maxDispersionScoreServerSet = + new HashSet(); + private float minDispersionScore = Float.MAX_VALUE; + private Set minDispersionScoreServerSet = + new HashSet(); + + private float avgDispersionNum = 0; + private float maxDispersionNum = 0; + private Set maxDispersionNumServerSet = + new HashSet(); + private float minDispersionNum = Float.MAX_VALUE; + private Set minDispersionNumServerSet = + new HashSet(); + + public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snapshot, + Map> regionLocalityMap) { + // Set the table name + this.tableName = tableName; + + // Get all the regions for this table + List regionInfoList = + snapshot.getTableToRegionMap().get(tableName); + // Get the total region num for the current table + this.totalRegions = regionInfoList.size(); + + // Get the existing assignment plan + FavoredNodesPlan favoredNodesAssignment = snapshot.getExistingAssignmentPlan(); + // Get the region to region server mapping + Map currentAssignment = + snapshot.getRegionToRegionServerMap(); + // Initialize the server to its hosing region counter map + Map serverToHostingRegionCounterMap = + new HashMap(); + + Map primaryRSToRegionCounterMap = + new HashMap(); + Map> primaryToSecTerRSMap = + new HashMap>(); + + // Check the favored nodes and its locality information + // Also keep tracker of the most loaded and least loaded region servers + for (HRegionInfo region : regionInfoList) { + try { + ServerName currentRS = currentAssignment.get(region); + // Handle unassigned regions + if (currentRS == null) { + unAssignedRegionsList.add(region); + continue; + } + + // Keep updating the server to is hosting region counter map + Integer hostRegionCounter = serverToHostingRegionCounterMap.get(currentRS); + if (hostRegionCounter == null) { + hostRegionCounter = Integer.valueOf(0); + } + hostRegionCounter = hostRegionCounter.intValue() + 1; + serverToHostingRegionCounterMap.put(currentRS, hostRegionCounter); + + // Get the favored nodes from the assignment plan and verify it. + List favoredNodes = favoredNodesAssignment.getFavoredNodes(region); + if (favoredNodes == null || + favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { + regionsWithoutValidFavoredNodes.add(region); + continue; + } + // Get the primary, secondary and tertiary region server + ServerName primaryRS = + favoredNodes.get(FavoredNodesPlan.Position.PRIMARY.ordinal()); + ServerName secondaryRS = + favoredNodes.get(FavoredNodesPlan.Position.SECONDARY.ordinal()); + ServerName tertiaryRS = + favoredNodes.get(FavoredNodesPlan.Position.TERTIARY.ordinal()); + + // Update the primary rs to its region set map + Integer regionCounter = primaryRSToRegionCounterMap.get(primaryRS); + if (regionCounter == null) { + regionCounter = Integer.valueOf(0); + } + regionCounter = regionCounter.intValue() + 1; + primaryRSToRegionCounterMap.put(primaryRS, regionCounter); + + // Update the primary rs to secondary and tertiary rs map + Set secAndTerSet = primaryToSecTerRSMap.get(primaryRS); + if (secAndTerSet == null) { + secAndTerSet = new HashSet(); + } + secAndTerSet.add(secondaryRS); + secAndTerSet.add(tertiaryRS); + primaryToSecTerRSMap.put(primaryRS, secAndTerSet); + + // Get the position of the current region server in the favored nodes list + FavoredNodesPlan.Position favoredNodePosition = + FavoredNodesPlan.getFavoredServerPosition(favoredNodes, currentRS); + + // Handle the non favored assignment. + if (favoredNodePosition == null) { + nonFavoredAssignedRegionList.add(region); + continue; + } + // Increase the favored nodes assignment. + this.favoredNodes[favoredNodePosition.ordinal()]++; + totalFavoredAssignments++; + + // Summary the locality information for each favored nodes + if (regionLocalityMap != null) { + // Set the enforce locality as true; + this.enforceLocality = true; + + // Get the region degree locality map + Map regionDegreeLocalityMap = + regionLocalityMap.get(region.getEncodedName()); + if (regionDegreeLocalityMap == null) { + continue; // ignore the region which doesn't have any store files. + } + + // Get the locality summary for each favored nodes + for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { + ServerName favoredNode = favoredNodes.get(p.ordinal()); + // Get the locality for the current favored nodes + Float locality = + regionDegreeLocalityMap.get(favoredNode.getHostname()); + if (locality != null) { + this.favoredNodesLocalitySummary[p.ordinal()] += locality; + } + } + + // Get the locality summary for the current region server + Float actualLocality = + regionDegreeLocalityMap.get(currentRS.getHostname()); + if (actualLocality != null) { + this.actualLocalitySummary += actualLocality; + } + } + } catch (Exception e) { + LOG.error("Cannot verify the region assignment for region " + + ((region == null) ? " null " : region.getRegionNameAsString()) + + "because of " + e); + } + } + + float dispersionScoreSummary = 0; + float dispersionNumSummary = 0; + // Calculate the secondary score for each primary region server + for (Map.Entry entry : + primaryRSToRegionCounterMap.entrySet()) { + ServerName primaryRS = entry.getKey(); + Integer regionsOnPrimary = entry.getValue(); + + // Process the dispersion number and score + float dispersionScore = 0; + int dispersionNum = 0; + if (primaryToSecTerRSMap.get(primaryRS) != null + && regionsOnPrimary.intValue() != 0) { + dispersionNum = primaryToSecTerRSMap.get(primaryRS).size(); + dispersionScore = dispersionNum / + ((float) regionsOnPrimary.intValue() * 2); + } + // Update the max dispersion score + if (dispersionScore > this.maxDispersionScore) { + this.maxDispersionScoreServerSet.clear(); + this.maxDispersionScoreServerSet.add(primaryRS); + this.maxDispersionScore = dispersionScore; + } else if (dispersionScore == this.maxDispersionScore) { + this.maxDispersionScoreServerSet.add(primaryRS); + } + + // Update the max dispersion num + if (dispersionNum > this.maxDispersionNum) { + this.maxDispersionNumServerSet.clear(); + this.maxDispersionNumServerSet.add(primaryRS); + this.maxDispersionNum = dispersionNum; + } else if (dispersionNum == this.maxDispersionNum) { + this.maxDispersionNumServerSet.add(primaryRS); + } + + // Update the min dispersion score + if (dispersionScore < this.minDispersionScore) { + this.minDispersionScoreServerSet.clear(); + this.minDispersionScoreServerSet.add(primaryRS); + this.minDispersionScore = dispersionScore; + } else if (dispersionScore == this.minDispersionScore) { + this.minDispersionScoreServerSet.add(primaryRS); + } + + // Update the min dispersion num + if (dispersionNum < this.minDispersionNum) { + this.minDispersionNumServerSet.clear(); + this.minDispersionNumServerSet.add(primaryRS); + this.minDispersionNum = dispersionNum; + } else if (dispersionNum == this.minDispersionNum) { + this.minDispersionNumServerSet.add(primaryRS); + } + + dispersionScoreSummary += dispersionScore; + dispersionNumSummary += dispersionNum; + } + + // Update the avg dispersion score + if (primaryRSToRegionCounterMap.keySet().size() != 0) { + this.avgDispersionScore = dispersionScoreSummary / + (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionNum = dispersionNumSummary / + (float) primaryRSToRegionCounterMap.keySet().size(); + } + + // Fill up the most loaded and least loaded region server information + for (Map.Entry entry : + serverToHostingRegionCounterMap.entrySet()) { + ServerName currentRS = entry.getKey(); + int hostRegionCounter = entry.getValue().intValue(); + + // Update the most loaded region server list and maxRegionsOnRS + if (hostRegionCounter > this.maxRegionsOnRS) { + maxRegionsOnRS = hostRegionCounter; + this.mostLoadedRSSet.clear(); + this.mostLoadedRSSet.add(currentRS); + } else if (hostRegionCounter == this.maxRegionsOnRS) { + this.mostLoadedRSSet.add(currentRS); + } + + // Update the least loaded region server list and minRegionsOnRS + if (hostRegionCounter < this.minRegionsOnRS) { + this.minRegionsOnRS = hostRegionCounter; + this.leastLoadedRSSet.clear(); + this.leastLoadedRSSet.add(currentRS); + } else if (hostRegionCounter == this.minRegionsOnRS) { + this.leastLoadedRSSet.add(currentRS); + } + } + + // and total region servers + this.totalRegionServers = serverToHostingRegionCounterMap.keySet().size(); + this.avgRegionsOnRS = (totalRegionServers == 0) ? 0 : + (totalRegions / (float) totalRegionServers); + // Set the isFilledUp as true + isFilledUp = true; + } + + /** + * Use this to project the dispersion scores + * @param tableName + * @param snapshot + * @param newPlan + */ + public void fillUpDispersion(TableName tableName, + SnapshotOfRegionAssignmentFromMeta snapshot, FavoredNodesPlan newPlan) { + // Set the table name + this.tableName = tableName; + // Get all the regions for this table + List regionInfoList = snapshot.getTableToRegionMap().get( + tableName); + // Get the total region num for the current table + this.totalRegions = regionInfoList.size(); + FavoredNodesPlan plan = null; + if (newPlan == null) { + plan = snapshot.getExistingAssignmentPlan(); + } else { + plan = newPlan; + } + // Get the region to region server mapping + Map primaryRSToRegionCounterMap = + new HashMap(); + Map> primaryToSecTerRSMap = + new HashMap>(); + + // Check the favored nodes and its locality information + // Also keep tracker of the most loaded and least loaded region servers + for (HRegionInfo region : regionInfoList) { + try { + // Get the favored nodes from the assignment plan and verify it. + List favoredNodes = plan.getFavoredNodes(region); + if (favoredNodes == null + || favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { + regionsWithoutValidFavoredNodes.add(region); + continue; + } + // Get the primary, secondary and tertiary region server + ServerName primaryRS = favoredNodes + .get(FavoredNodesPlan.Position.PRIMARY.ordinal()); + ServerName secondaryRS = favoredNodes + .get(FavoredNodesPlan.Position.SECONDARY.ordinal()); + ServerName tertiaryRS = favoredNodes + .get(FavoredNodesPlan.Position.TERTIARY.ordinal()); + + // Update the primary rs to its region set map + Integer regionCounter = primaryRSToRegionCounterMap.get(primaryRS); + if (regionCounter == null) { + regionCounter = Integer.valueOf(0); + } + regionCounter = regionCounter.intValue() + 1; + primaryRSToRegionCounterMap.put(primaryRS, regionCounter); + + // Update the primary rs to secondary and tertiary rs map + Set secAndTerSet = primaryToSecTerRSMap.get(primaryRS); + if (secAndTerSet == null) { + secAndTerSet = new HashSet(); + } + secAndTerSet.add(secondaryRS); + secAndTerSet.add(tertiaryRS); + primaryToSecTerRSMap.put(primaryRS, secAndTerSet); + } catch (Exception e) { + LOG.error("Cannot verify the region assignment for region " + + ((region == null) ? " null " : region.getRegionNameAsString()) + + "because of " + e); + } + } + float dispersionScoreSummary = 0; + float dispersionNumSummary = 0; + // Calculate the secondary score for each primary region server + for (Map.Entry entry : + primaryRSToRegionCounterMap.entrySet()) { + ServerName primaryRS = entry.getKey(); + Integer regionsOnPrimary = entry.getValue(); + + // Process the dispersion number and score + float dispersionScore = 0; + int dispersionNum = 0; + if (primaryToSecTerRSMap.get(primaryRS) != null + && regionsOnPrimary.intValue() != 0) { + dispersionNum = primaryToSecTerRSMap.get(primaryRS).size(); + dispersionScore = dispersionNum / + ((float) regionsOnPrimary.intValue() * 2); + } + + // Update the max dispersion num + if (dispersionNum > this.maxDispersionNum) { + this.maxDispersionNumServerSet.clear(); + this.maxDispersionNumServerSet.add(primaryRS); + this.maxDispersionNum = dispersionNum; + } else if (dispersionNum == this.maxDispersionNum) { + this.maxDispersionNumServerSet.add(primaryRS); + } + + // Update the min dispersion score + if (dispersionScore < this.minDispersionScore) { + this.minDispersionScoreServerSet.clear(); + this.minDispersionScoreServerSet.add(primaryRS); + this.minDispersionScore = dispersionScore; + } else if (dispersionScore == this.minDispersionScore) { + this.minDispersionScoreServerSet.add(primaryRS); + } + + // Update the min dispersion num + if (dispersionNum < this.minDispersionNum) { + this.minDispersionNumServerSet.clear(); + this.minDispersionNumServerSet.add(primaryRS); + this.minDispersionNum = dispersionNum; + } else if (dispersionNum == this.minDispersionNum) { + this.minDispersionNumServerSet.add(primaryRS); + } + + dispersionScoreSummary += dispersionScore; + dispersionNumSummary += dispersionNum; + } + + // Update the avg dispersion score + if (primaryRSToRegionCounterMap.keySet().size() != 0) { + this.avgDispersionScore = dispersionScoreSummary / + (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionNum = dispersionNumSummary / + (float) primaryRSToRegionCounterMap.keySet().size(); + } + } + + /** + * @return list which contains just 3 elements: average dispersion score, max + * dispersion score and min dispersion score as first, second and third element + * respectively. + * + */ + public List getDispersionInformation() { + List dispersion = new ArrayList(); + dispersion.add(avgDispersionScore); + dispersion.add(maxDispersionScore); + dispersion.add(minDispersionScore); + return dispersion; + } + + public void print(boolean isDetailMode) { + if (!isFilledUp) { + System.err.println("[Error] Region assignment verfication report" + + "hasn't been filled up"); + } + DecimalFormat df = new java.text.DecimalFormat( "#.##"); + + // Print some basic information + System.out.println("Region Assignment Verification for Table: " + tableName + + "\n\tTotal regions : " + totalRegions); + + // Print the number of regions on each kinds of the favored nodes + System.out.println("\tTotal regions on favored nodes " + + totalFavoredAssignments); + for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { + System.out.println("\t\tTotal regions on "+ p.toString() + + " region servers: " + favoredNodes[p.ordinal()]); + } + // Print the number of regions in each kinds of invalid assignment + System.out.println("\tTotal unassigned regions: " + + unAssignedRegionsList.size()); + if (isDetailMode) { + for (HRegionInfo region : unAssignedRegionsList) { + System.out.println("\t\t" + region.getRegionNameAsString()); + } + } + + System.out.println("\tTotal regions NOT on favored nodes: " + + nonFavoredAssignedRegionList.size()); + if (isDetailMode) { + for (HRegionInfo region : nonFavoredAssignedRegionList) { + System.out.println("\t\t" + region.getRegionNameAsString()); + } + } + + System.out.println("\tTotal regions without favored nodes: " + + regionsWithoutValidFavoredNodes.size()); + if (isDetailMode) { + for (HRegionInfo region : regionsWithoutValidFavoredNodes) { + System.out.println("\t\t" + region.getRegionNameAsString()); + } + } + + // Print the locality information if enabled + if (this.enforceLocality && totalRegions != 0) { + // Print the actual locality for this table + float actualLocality = 100 * + this.actualLocalitySummary / (float) totalRegions; + System.out.println("\n\tThe actual avg locality is " + + df.format(actualLocality) + " %"); + + // Print the expected locality if regions are placed on the each kinds of + // favored nodes + for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { + float avgLocality = 100 * + (favoredNodesLocalitySummary[p.ordinal()] / (float) totalRegions); + System.out.println("\t\tThe expected avg locality if all regions" + + " on the " + p.toString() + " region servers: " + + df.format(avgLocality) + " %"); + } + } + + // Print the region balancing information + System.out.println("\n\tTotal hosting region servers: " + + totalRegionServers); + // Print the region balance information + if (totalRegionServers != 0) { + System.out.println( + "\tAvg dispersion num: " +df.format(avgDispersionNum) + + " hosts;\tMax dispersion num: " + df.format(maxDispersionNum) + + " hosts;\tMin dispersion num: " + df.format(minDispersionNum) + + " hosts;"); + + System.out.println("\t\tThe number of the region servers with the max" + + " dispersion num: " + this.maxDispersionNumServerSet.size()); + if (isDetailMode) { + printHServerAddressSet(maxDispersionNumServerSet); + } + + System.out.println("\t\tThe number of the region servers with the min" + + " dispersion num: " + this.minDispersionNumServerSet.size()); + if (isDetailMode) { + printHServerAddressSet(maxDispersionNumServerSet); + } + + System.out.println( + "\tAvg dispersion score: " + df.format(avgDispersionScore) + + ";\tMax dispersion score: " + df.format(maxDispersionScore) + + ";\tMin dispersion score: " + df.format(minDispersionScore) + ";"); + + System.out.println("\t\tThe number of the region servers with the max" + + " dispersion score: " + this.maxDispersionScoreServerSet.size()); + if (isDetailMode) { + printHServerAddressSet(maxDispersionScoreServerSet); + } + + System.out.println("\t\tThe number of the region servers with the min" + + " dispersion score: " + this.minDispersionScoreServerSet.size()); + if (isDetailMode) { + printHServerAddressSet(minDispersionScoreServerSet); + } + + System.out.println( + "\tAvg regions/region server: " + df.format(avgRegionsOnRS) + + ";\tMax regions/region server: " + maxRegionsOnRS + + ";\tMin regions/region server: " + minRegionsOnRS + ";"); + + // Print the details about the most loaded region servers + System.out.println("\t\tThe number of the most loaded region servers: " + + mostLoadedRSSet.size()); + if (isDetailMode) { + printHServerAddressSet(mostLoadedRSSet); + } + + // Print the details about the least loaded region servers + System.out.println("\t\tThe number of the least loaded region servers: " + + leastLoadedRSSet.size()); + if (isDetailMode) { + printHServerAddressSet(leastLoadedRSSet); + } + } + System.out.println("=============================="); + } + + private void printHServerAddressSet(Set serverSet) { + if (serverSet == null) { + return ; + } + int i = 0; + for (ServerName addr : serverSet){ + if ((i++) % 3 == 0) { + System.out.print("\n\t\t\t"); + } + System.out.print(addr.getHostAndPort() + " ; "); + } + System.out.println("\n"); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java new file mode 100644 index 00000000000..9e42ffea496 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java @@ -0,0 +1,1114 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import java.io.IOException; +import java.text.DecimalFormat; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Scanner; +import java.util.Set; +import java.util.TreeMap; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.GnuParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper; +import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.MunkresAssignment; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + +/** + * A tool that is used for manipulating and viewing favored nodes information + * for regions. Run with -h to get a list of the options + * + */ +@InterfaceAudience.Private +public class RegionPlacementMaintainer { + private static final Log LOG = LogFactory.getLog(RegionPlacementMaintainer.class + .getName()); + //The cost of a placement that should never be assigned. + private static final float MAX_COST = Float.POSITIVE_INFINITY; + + // The cost of a placement that is undesirable but acceptable. + private static final float AVOID_COST = 100000f; + + // The amount by which the cost of a placement is increased if it is the + // last slot of the server. This is done to more evenly distribute the slop + // amongst servers. + private static final float LAST_SLOT_COST_PENALTY = 0.5f; + + // The amount by which the cost of a primary placement is penalized if it is + // not the host currently serving the region. This is done to minimize moves. + private static final float NOT_CURRENT_HOST_PENALTY = 0.1f; + + private static boolean USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY = false; + + private Configuration conf; + private final boolean enforceLocality; + private final boolean enforceMinAssignmentMove; + private HBaseAdmin admin; + private RackManager rackManager; + private Set targetTableSet; + + public RegionPlacementMaintainer(Configuration conf) { + this(conf, true, true); + } + + public RegionPlacementMaintainer(Configuration conf, boolean enforceLocality, + boolean enforceMinAssignmentMove) { + this.conf = conf; + this.enforceLocality = enforceLocality; + this.enforceMinAssignmentMove = enforceMinAssignmentMove; + this.targetTableSet = new HashSet(); + this.rackManager = new RackManager(conf); + } + private static void printHelp(Options opt) { + new HelpFormatter().printHelp( + "RegionPlacement < -w | -u | -n | -v | -t | -h | -overwrite -r regionName -f favoredNodes " + + "-diff>" + + " [-l false] [-m false] [-d] [-tables t1,t2,...tn] [-zk zk1,zk2,zk3]" + + " [-fs hdfs://a.b.c.d:9000] [-hbase_root /HBASE]", opt); + } + + public void setTargetTableName(String[] tableNames) { + if (tableNames != null) { + for (String table : tableNames) + this.targetTableSet.add(TableName.valueOf(table)); + } + } + + /** + * @return the cached HBaseAdmin + * @throws IOException + */ + private HBaseAdmin getHBaseAdmin() throws IOException { + if (this.admin == null) { + this.admin = new HBaseAdmin(this.conf); + } + return this.admin; + } + + /** + * @return the new RegionAssignmentSnapshot + * @throws IOException + */ + public SnapshotOfRegionAssignmentFromMeta getRegionAssignmentSnapshot() + throws IOException { + SnapshotOfRegionAssignmentFromMeta currentAssignmentShapshot = + new SnapshotOfRegionAssignmentFromMeta(new CatalogTracker(this.conf)); + currentAssignmentShapshot.initialize(); + return currentAssignmentShapshot; + } + + /** + * Verify the region placement is consistent with the assignment plan; + * @throws IOException + */ + public void verifyRegionPlacement(boolean isDetailMode) throws IOException { + System.out.println("Start to verify the region assignment and " + + "generate the verification report"); + // Get the region assignment snapshot + SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot(); + + // Get all the tables + Set tables = snapshot.getTableSet(); + + // Get the region locality map + Map> regionLocalityMap = null; + if (this.enforceLocality == true) { + regionLocalityMap = FSUtils.getRegionDegreeLocalityMappingFromFS(conf); + } + // Iterate all the tables to fill up the verification report + for (TableName table : tables) { + if (!this.targetTableSet.isEmpty() && + !this.targetTableSet.contains(table)) { + continue; + } + AssignmentVerificationReport report = new AssignmentVerificationReport(); + report.fillUp(table, snapshot, regionLocalityMap); + report.print(isDetailMode); + } + } + + /** + * Generate the assignment plan for the existing table + * + * @param tableName + * @param assignmentSnapshot + * @param regionLocalityMap + * @param plan + * @param munkresForSecondaryAndTertiary if set on true the assignment plan + * for the tertiary and secondary will be generated with Munkres algorithm, + * otherwise will be generated using placeSecondaryAndTertiaryRS + * @throws IOException + */ + private void genAssignmentPlan(TableName tableName, + SnapshotOfRegionAssignmentFromMeta assignmentSnapshot, + Map> regionLocalityMap, FavoredNodesPlan plan, + boolean munkresForSecondaryAndTertiary) throws IOException { + // Get the all the regions for the current table + List regions = + assignmentSnapshot.getTableToRegionMap().get(tableName); + int numRegions = regions.size(); + + // Get the current assignment map + Map currentAssignmentMap = + assignmentSnapshot.getRegionToRegionServerMap(); + + // Get the all the region servers + List servers = new ArrayList(); + servers.addAll(getHBaseAdmin().getClusterStatus().getServers()); + + LOG.info("Start to generate assignment plan for " + numRegions + + " regions from table " + tableName + " with " + + servers.size() + " region servers"); + + int slotsPerServer = (int) Math.ceil((float) numRegions / + servers.size()); + int regionSlots = slotsPerServer * servers.size(); + + // Compute the primary, secondary and tertiary costs for each region/server + // pair. These costs are based only on node locality and rack locality, and + // will be modified later. + float[][] primaryCost = new float[numRegions][regionSlots]; + float[][] secondaryCost = new float[numRegions][regionSlots]; + float[][] tertiaryCost = new float[numRegions][regionSlots]; + + if (this.enforceLocality && regionLocalityMap != null) { + // Transform the locality mapping into a 2D array, assuming that any + // unspecified locality value is 0. + float[][] localityPerServer = new float[numRegions][regionSlots]; + for (int i = 0; i < numRegions; i++) { + Map serverLocalityMap = + regionLocalityMap.get(regions.get(i).getEncodedName()); + if (serverLocalityMap == null) { + continue; + } + for (int j = 0; j < servers.size(); j++) { + String serverName = servers.get(j).getHostname(); + if (serverName == null) { + continue; + } + Float locality = serverLocalityMap.get(serverName); + if (locality == null) { + continue; + } + for (int k = 0; k < slotsPerServer; k++) { + // If we can't find the locality of a region to a server, which occurs + // because locality is only reported for servers which have some + // blocks of a region local, then the locality for that pair is 0. + localityPerServer[i][j * slotsPerServer + k] = locality.floatValue(); + } + } + } + + // Compute the total rack locality for each region in each rack. The total + // rack locality is the sum of the localities of a region on all servers in + // a rack. + Map> rackRegionLocality = + new HashMap>(); + for (int i = 0; i < numRegions; i++) { + HRegionInfo region = regions.get(i); + for (int j = 0; j < regionSlots; j += slotsPerServer) { + String rack = rackManager.getRack(servers.get(j / slotsPerServer)); + Map rackLocality = rackRegionLocality.get(rack); + if (rackLocality == null) { + rackLocality = new HashMap(); + rackRegionLocality.put(rack, rackLocality); + } + Float localityObj = rackLocality.get(region); + float locality = localityObj == null ? 0 : localityObj.floatValue(); + locality += localityPerServer[i][j]; + rackLocality.put(region, locality); + } + } + for (int i = 0; i < numRegions; i++) { + for (int j = 0; j < regionSlots; j++) { + String rack = rackManager.getRack(servers.get(j / slotsPerServer)); + Float totalRackLocalityObj = + rackRegionLocality.get(rack).get(regions.get(i)); + float totalRackLocality = totalRackLocalityObj == null ? + 0 : totalRackLocalityObj.floatValue(); + + // Primary cost aims to favor servers with high node locality and low + // rack locality, so that secondaries and tertiaries can be chosen for + // nodes with high rack locality. This might give primaries with + // slightly less locality at first compared to a cost which only + // considers the node locality, but should be better in the long run. + primaryCost[i][j] = 1 - (2 * localityPerServer[i][j] - + totalRackLocality); + + // Secondary cost aims to favor servers with high node locality and high + // rack locality since the tertiary will be chosen from the same rack as + // the secondary. This could be negative, but that is okay. + secondaryCost[i][j] = 2 - (localityPerServer[i][j] + totalRackLocality); + + // Tertiary cost is only concerned with the node locality. It will later + // be restricted to only hosts on the same rack as the secondary. + tertiaryCost[i][j] = 1 - localityPerServer[i][j]; + } + } + } + + if (this.enforceMinAssignmentMove && currentAssignmentMap != null) { + // We want to minimize the number of regions which move as the result of a + // new assignment. Therefore, slightly penalize any placement which is for + // a host that is not currently serving the region. + for (int i = 0; i < numRegions; i++) { + for (int j = 0; j < servers.size(); j++) { + ServerName currentAddress = currentAssignmentMap.get(regions.get(i)); + if (currentAddress != null && + !currentAddress.equals(servers.get(j))) { + for (int k = 0; k < slotsPerServer; k++) { + primaryCost[i][j * slotsPerServer + k] += NOT_CURRENT_HOST_PENALTY; + } + } + } + } + } + + // Artificially increase cost of last slot of each server to evenly + // distribute the slop, otherwise there will be a few servers with too few + // regions and many servers with the max number of regions. + for (int i = 0; i < numRegions; i++) { + for (int j = 0; j < regionSlots; j += slotsPerServer) { + primaryCost[i][j] += LAST_SLOT_COST_PENALTY; + secondaryCost[i][j] += LAST_SLOT_COST_PENALTY; + tertiaryCost[i][j] += LAST_SLOT_COST_PENALTY; + } + } + + RandomizedMatrix randomizedMatrix = new RandomizedMatrix(numRegions, + regionSlots); + primaryCost = randomizedMatrix.transform(primaryCost); + int[] primaryAssignment = new MunkresAssignment(primaryCost).solve(); + primaryAssignment = randomizedMatrix.invertIndices(primaryAssignment); + + // Modify the secondary and tertiary costs for each region/server pair to + // prevent a region from being assigned to the same rack for both primary + // and either one of secondary or tertiary. + for (int i = 0; i < numRegions; i++) { + int slot = primaryAssignment[i]; + String rack = rackManager.getRack(servers.get(slot / slotsPerServer)); + for (int k = 0; k < servers.size(); k++) { + if (!rackManager.getRack(servers.get(k)).equals(rack)) { + continue; + } + if (k == slot / slotsPerServer) { + // Same node, do not place secondary or tertiary here ever. + for (int m = 0; m < slotsPerServer; m++) { + secondaryCost[i][k * slotsPerServer + m] = MAX_COST; + tertiaryCost[i][k * slotsPerServer + m] = MAX_COST; + } + } else { + // Same rack, do not place secondary or tertiary here if possible. + for (int m = 0; m < slotsPerServer; m++) { + secondaryCost[i][k * slotsPerServer + m] = AVOID_COST; + tertiaryCost[i][k * slotsPerServer + m] = AVOID_COST; + } + } + } + } + if (munkresForSecondaryAndTertiary) { + randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots); + secondaryCost = randomizedMatrix.transform(secondaryCost); + int[] secondaryAssignment = new MunkresAssignment(secondaryCost).solve(); + secondaryAssignment = randomizedMatrix.invertIndices(secondaryAssignment); + + // Modify the tertiary costs for each region/server pair to ensure that a + // region is assigned to a tertiary server on the same rack as its secondary + // server, but not the same server in that rack. + for (int i = 0; i < numRegions; i++) { + int slot = secondaryAssignment[i]; + String rack = rackManager.getRack(servers.get(slot / slotsPerServer)); + for (int k = 0; k < servers.size(); k++) { + if (k == slot / slotsPerServer) { + // Same node, do not place tertiary here ever. + for (int m = 0; m < slotsPerServer; m++) { + tertiaryCost[i][k * slotsPerServer + m] = MAX_COST; + } + } else { + if (rackManager.getRack(servers.get(k)).equals(rack)) { + continue; + } + // Different rack, do not place tertiary here if possible. + for (int m = 0; m < slotsPerServer; m++) { + tertiaryCost[i][k * slotsPerServer + m] = AVOID_COST; + } + } + } + } + + randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots); + tertiaryCost = randomizedMatrix.transform(tertiaryCost); + int[] tertiaryAssignment = new MunkresAssignment(tertiaryCost).solve(); + tertiaryAssignment = randomizedMatrix.invertIndices(tertiaryAssignment); + + for (int i = 0; i < numRegions; i++) { + List favoredServers = + new ArrayList(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); + ServerName s = servers.get(primaryAssignment[i] / slotsPerServer); + favoredServers.add(new ServerName(s.getHostname(), s.getPort(), + ServerName.NON_STARTCODE)); + + s = servers.get(secondaryAssignment[i] / slotsPerServer); + favoredServers.add(new ServerName(s.getHostname(), s.getPort(), + ServerName.NON_STARTCODE)); + + s = servers.get(tertiaryAssignment[i] / slotsPerServer); + favoredServers.add(new ServerName(s.getHostname(), s.getPort(), + ServerName.NON_STARTCODE)); + // Update the assignment plan + plan.updateAssignmentPlan(regions.get(i), favoredServers); + } + LOG.info("Generated the assignment plan for " + numRegions + + " regions from table " + tableName + " with " + + servers.size() + " region servers"); + LOG.info("Assignment plan for secondary and tertiary generated " + + "using MunkresAssignment"); + } else { + Map primaryRSMap = new HashMap(); + for (int i = 0; i < numRegions; i++) { + primaryRSMap.put(regions.get(i), servers.get(primaryAssignment[i] / slotsPerServer)); + } + FavoredNodeAssignmentHelper favoredNodeHelper = + new FavoredNodeAssignmentHelper(servers, conf); + favoredNodeHelper.initialize(); + Map secondaryAndTertiaryMap = + favoredNodeHelper.placeSecondaryAndTertiaryWithRestrictions(primaryRSMap); + for (int i = 0; i < numRegions; i++) { + List favoredServers = + new ArrayList(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); + HRegionInfo currentRegion = regions.get(i); + ServerName s = primaryRSMap.get(currentRegion); + favoredServers.add(new ServerName(s.getHostname(), s.getPort(), + ServerName.NON_STARTCODE)); + + ServerName[] secondaryAndTertiary = + secondaryAndTertiaryMap.get(currentRegion); + s = secondaryAndTertiary[0]; + favoredServers.add(new ServerName(s.getHostname(), s.getPort(), + ServerName.NON_STARTCODE)); + + s = secondaryAndTertiary[1]; + favoredServers.add(new ServerName(s.getHostname(), s.getPort(), + ServerName.NON_STARTCODE)); + // Update the assignment plan + plan.updateAssignmentPlan(regions.get(i), favoredServers); + } + LOG.info("Generated the assignment plan for " + numRegions + + " regions from table " + tableName + " with " + + servers.size() + " region servers"); + LOG.info("Assignment plan for secondary and tertiary generated " + + "using placeSecondaryAndTertiaryWithRestrictions method"); + } + } + + public FavoredNodesPlan getNewAssignmentPlan() throws IOException { + // Get the current region assignment snapshot by scanning from the META + SnapshotOfRegionAssignmentFromMeta assignmentSnapshot = + this.getRegionAssignmentSnapshot(); + + // Get the region locality map + Map> regionLocalityMap = null; + if (this.enforceLocality) { + regionLocalityMap = FSUtils.getRegionDegreeLocalityMappingFromFS(conf); + } + // Initialize the assignment plan + FavoredNodesPlan plan = new FavoredNodesPlan(); + + // Get the table to region mapping + Map> tableToRegionMap = + assignmentSnapshot.getTableToRegionMap(); + LOG.info("Start to generate the new assignment plan for the " + + + tableToRegionMap.keySet().size() + " tables" ); + for (TableName table : tableToRegionMap.keySet()) { + try { + if (!this.targetTableSet.isEmpty() && + !this.targetTableSet.contains(table)) { + continue; + } + // TODO: maybe run the placement in parallel for each table + genAssignmentPlan(table, assignmentSnapshot, regionLocalityMap, plan, + USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY); + } catch (Exception e) { + LOG.error("Get some exceptions for placing primary region server" + + "for table " + table + " because " + e); + } + } + LOG.info("Finish to generate the new assignment plan for the " + + + tableToRegionMap.keySet().size() + " tables" ); + return plan; + } + + /** + * Some algorithms for solving the assignment problem may traverse workers or + * jobs in linear order which may result in skewing the assignments of the + * first jobs in the matrix toward the last workers in the matrix if the + * costs are uniform. To avoid this kind of clumping, we can randomize the + * rows and columns of the cost matrix in a reversible way, such that the + * solution to the assignment problem can be interpreted in terms of the + * original untransformed cost matrix. Rows and columns are transformed + * independently such that the elements contained in any row of the input + * matrix are the same as the elements in the corresponding output matrix, + * and each row has its elements transformed in the same way. Similarly for + * columns. + */ + protected static class RandomizedMatrix { + private final int rows; + private final int cols; + private final int[] rowTransform; + private final int[] rowInverse; + private final int[] colTransform; + private final int[] colInverse; + + /** + * Create a randomization scheme for a matrix of a given size. + * @param rows the number of rows in the matrix + * @param cols the number of columns in the matrix + */ + public RandomizedMatrix(int rows, int cols) { + this.rows = rows; + this.cols = cols; + Random random = new Random(); + rowTransform = new int[rows]; + rowInverse = new int[rows]; + for (int i = 0; i < rows; i++) { + rowTransform[i] = i; + } + // Shuffle the row indices. + for (int i = rows - 1; i >= 0; i--) { + int r = random.nextInt(i + 1); + int temp = rowTransform[r]; + rowTransform[r] = rowTransform[i]; + rowTransform[i] = temp; + } + // Generate the inverse row indices. + for (int i = 0; i < rows; i++) { + rowInverse[rowTransform[i]] = i; + } + + colTransform = new int[cols]; + colInverse = new int[cols]; + for (int i = 0; i < cols; i++) { + colTransform[i] = i; + } + // Shuffle the column indices. + for (int i = cols - 1; i >= 0; i--) { + int r = random.nextInt(i + 1); + int temp = colTransform[r]; + colTransform[r] = colTransform[i]; + colTransform[i] = temp; + } + // Generate the inverse column indices. + for (int i = 0; i < cols; i++) { + colInverse[colTransform[i]] = i; + } + } + + /** + * Copy a given matrix into a new matrix, transforming each row index and + * each column index according to the randomization scheme that was created + * at construction time. + * @param matrix the cost matrix to transform + * @return a new matrix with row and column indices transformed + */ + public float[][] transform(float[][] matrix) { + float[][] result = new float[rows][cols]; + for (int i = 0; i < rows; i++) { + for (int j = 0; j < cols; j++) { + result[rowTransform[i]][colTransform[j]] = matrix[i][j]; + } + } + return result; + } + + /** + * Copy a given matrix into a new matrix, transforming each row index and + * each column index according to the inverse of the randomization scheme + * that was created at construction time. + * @param matrix the cost matrix to be inverted + * @return a new matrix with row and column indices inverted + */ + public float[][] invert(float[][] matrix) { + float[][] result = new float[rows][cols]; + for (int i = 0; i < rows; i++) { + for (int j = 0; j < cols; j++) { + result[rowInverse[i]][colInverse[j]] = matrix[i][j]; + } + } + return result; + } + + /** + * Given an array where each element {@code indices[i]} represents the + * randomized column index corresponding to randomized row index {@code i}, + * create a new array with the corresponding inverted indices. + * @param indices an array of transformed indices to be inverted + * @return an array of inverted indices + */ + public int[] invertIndices(int[] indices) { + int[] result = new int[indices.length]; + for (int i = 0; i < indices.length; i++) { + result[rowInverse[i]] = colInverse[indices[i]]; + } + return result; + } + } + + /** + * Print the assignment plan to the system output stream + * @param plan + */ + public static void printAssignmentPlan(FavoredNodesPlan plan) { + if (plan == null) return; + LOG.info("========== Start to print the assignment plan ================"); + // sort the map based on region info + Map> assignmentMap = + new TreeMap>(plan.getAssignmentMap()); + + for (Map.Entry> entry : assignmentMap.entrySet()) { + + String serverList = FavoredNodeAssignmentHelper.getFavoredNodesAsString(entry.getValue()); + String regionName = entry.getKey().getRegionNameAsString(); + LOG.info("Region: " + regionName ); + LOG.info("Its favored nodes: " + serverList); + } + LOG.info("========== Finish to print the assignment plan ================"); + } + + /** + * Update the assignment plan into .META. + * @param plan the assignments plan to be updated into .META. + * @throws IOException if cannot update assignment plan in .META. + */ + public void updateAssignmentPlanToMeta(FavoredNodesPlan plan) + throws IOException { + try { + LOG.info("Start to update the META with the new assignment plan"); + Map> assignmentMap = + plan.getAssignmentMap(); + FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(assignmentMap, conf); + LOG.info("Updated the META with the new assignment plan"); + } catch (Exception e) { + LOG.error("Failed to update META with the new assignment" + + "plan because " + e.getMessage()); + } + } + + /** + * Update the assignment plan to all the region servers + * @param plan + * @throws IOException + */ + private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) + throws IOException{ + LOG.info("Start to update the region servers with the new assignment plan"); + // Get the region to region server map + Map> currentAssignment = + this.getRegionAssignmentSnapshot().getRegionServerToRegionMap(); + HConnection connection = this.getHBaseAdmin().getConnection(); + + // track of the failed and succeeded updates + int succeededNum = 0; + Map failedUpdateMap = + new HashMap(); + + for (Map.Entry> entry : + currentAssignment.entrySet()) { + List>> regionUpdateInfos = + new ArrayList>>(); + try { + // Keep track of the favored updates for the current region server + FavoredNodesPlan singleServerPlan = null; + // Find out all the updates for the current region server + for (HRegionInfo region : entry.getValue()) { + List favoredServerList = plan.getFavoredNodes(region); + if (favoredServerList != null && + favoredServerList.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { + // Create the single server plan if necessary + if (singleServerPlan == null) { + singleServerPlan = new FavoredNodesPlan(); + } + // Update the single server update + singleServerPlan.updateAssignmentPlan(region, favoredServerList); + } + regionUpdateInfos.add( + new Pair>(region, favoredServerList)); + } + if (singleServerPlan != null) { + // Update the current region server with its updated favored nodes + BlockingInterface currentRegionServer = connection.getAdmin(entry.getKey()); + UpdateFavoredNodesRequest request = + RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos); + + UpdateFavoredNodesResponse updateFavoredNodesResponse = + currentRegionServer.updateFavoredNodes(null, request); + LOG.info("Region server " + + ProtobufUtil.getServerInfo(currentRegionServer).getServerName() + + " has updated " + updateFavoredNodesResponse.getResponse() + " / " + + singleServerPlan.getAssignmentMap().size() + + " regions with the assignment plan"); + succeededNum ++; + } + } catch (Exception e) { + failedUpdateMap.put(entry.getKey(), e); + } + } + // log the succeeded updates + LOG.info("Updated " + succeededNum + " region servers with " + + "the new assignment plan"); + + // log the failed updates + int failedNum = failedUpdateMap.size(); + if (failedNum != 0) { + LOG.error("Failed to update the following + " + failedNum + + " region servers with its corresponding favored nodes"); + for (Map.Entry entry : + failedUpdateMap.entrySet() ) { + LOG.error("Failed to update " + entry.getKey().getHostAndPort() + + " because of " + entry.getValue().getMessage()); + } + } + } + + public void updateAssignmentPlan(FavoredNodesPlan plan) + throws IOException { + LOG.info("Start to update the new assignment plan for the META table and" + + " the region servers"); + // Update the new assignment plan to META + updateAssignmentPlanToMeta(plan); + // Update the new assignment plan to Region Servers + updateAssignmentPlanToRegionServers(plan); + LOG.info("Finish to update the new assignment plan for the META table and" + + " the region servers"); + } + + /** + * Return how many regions will move per table since their primary RS will + * change + * + * @param newPlan - new AssignmentPlan + * @return how many primaries will move per table + */ + public Map getRegionsMovement(FavoredNodesPlan newPlan) + throws IOException { + Map movesPerTable = new HashMap(); + SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot(); + Map> tableToRegions = snapshot + .getTableToRegionMap(); + FavoredNodesPlan oldPlan = snapshot.getExistingAssignmentPlan(); + Set tables = snapshot.getTableSet(); + for (TableName table : tables) { + int movedPrimaries = 0; + if (!this.targetTableSet.isEmpty() + && !this.targetTableSet.contains(table)) { + continue; + } + List regions = tableToRegions.get(table); + for (HRegionInfo region : regions) { + List oldServers = oldPlan.getFavoredNodes(region); + List newServers = newPlan.getFavoredNodes(region); + if (oldServers != null && newServers != null) { + ServerName oldPrimary = oldServers.get(0); + ServerName newPrimary = newServers.get(0); + if (oldPrimary.compareTo(newPrimary) != 0) { + movedPrimaries++; + } + } + } + movesPerTable.put(table, movedPrimaries); + } + return movesPerTable; + } + + /** + * Compares two plans and check whether the locality dropped or increased + * (prints the information as a string) also prints the baseline locality + * + * @param movesPerTable - how many primary regions will move per table + * @param regionLocalityMap - locality map from FS + * @param newPlan - new assignment plan + * @throws IOException + */ + public void checkDifferencesWithOldPlan(Map movesPerTable, + Map> regionLocalityMap, FavoredNodesPlan newPlan) + throws IOException { + // localities for primary, secondary and tertiary + SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot(); + FavoredNodesPlan oldPlan = snapshot.getExistingAssignmentPlan(); + Set tables = snapshot.getTableSet(); + Map> tableToRegionsMap = snapshot.getTableToRegionMap(); + for (TableName table : tables) { + float[] deltaLocality = new float[3]; + float[] locality = new float[3]; + if (!this.targetTableSet.isEmpty() + && !this.targetTableSet.contains(table)) { + continue; + } + List regions = tableToRegionsMap.get(table); + System.out.println("=================================================="); + System.out.println("Assignment Plan Projection Report For Table: " + table); + System.out.println("\t Total regions: " + regions.size()); + System.out.println("\t" + movesPerTable.get(table) + + " primaries will move due to their primary has changed"); + for (HRegionInfo currentRegion : regions) { + Map regionLocality = regionLocalityMap.get(currentRegion + .getEncodedName()); + if (regionLocality == null) { + continue; + } + List oldServers = oldPlan.getFavoredNodes(currentRegion); + List newServers = newPlan.getFavoredNodes(currentRegion); + if (newServers != null && oldServers != null) { + int i=0; + for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { + ServerName newServer = newServers.get(p.ordinal()); + ServerName oldServer = oldServers.get(p.ordinal()); + Float oldLocality = 0f; + if (oldServers != null) { + oldLocality = regionLocality.get(oldServer.getHostname()); + if (oldLocality == null) { + oldLocality = 0f; + } + locality[i] += oldLocality; + } + Float newLocality = regionLocality.get(newServer.getHostname()); + if (newLocality == null) { + newLocality = 0f; + } + deltaLocality[i] += newLocality - oldLocality; + i++; + } + } + } + DecimalFormat df = new java.text.DecimalFormat( "#.##"); + for (int i = 0; i < deltaLocality.length; i++) { + System.out.print("\t\t Baseline locality for "); + if (i == 0) { + System.out.print("primary "); + } else if (i == 1) { + System.out.print("secondary "); + } else if (i == 2) { + System.out.print("tertiary "); + } + System.out.println(df.format(100 * locality[i] / regions.size()) + "%"); + System.out.print("\t\t Locality will change with the new plan: "); + System.out.println(df.format(100 * deltaLocality[i] / regions.size()) + + "%"); + } + System.out.println("\t Baseline dispersion"); + printDispersionScores(table, snapshot, regions.size(), null, true); + System.out.println("\t Projected dispersion"); + printDispersionScores(table, snapshot, regions.size(), newPlan, true); + } + } + + public void printDispersionScores(TableName table, + SnapshotOfRegionAssignmentFromMeta snapshot, int numRegions, FavoredNodesPlan newPlan, + boolean simplePrint) { + if (!this.targetTableSet.isEmpty() && !this.targetTableSet.contains(table)) { + return; + } + AssignmentVerificationReport report = new AssignmentVerificationReport(); + report.fillUpDispersion(table, snapshot, newPlan); + List dispersion = report.getDispersionInformation(); + if (simplePrint) { + DecimalFormat df = new java.text.DecimalFormat("#.##"); + System.out.println("\tAvg dispersion score: " + + df.format(dispersion.get(0)) + " hosts;\tMax dispersion score: " + + df.format(dispersion.get(1)) + " hosts;\tMin dispersion score: " + + df.format(dispersion.get(2)) + " hosts;"); + } else { + LOG.info("For Table: " + table + " ; #Total Regions: " + numRegions + + " ; The average dispersion score is " + dispersion.get(0)); + } + } + + public void printLocalityAndDispersionForCurrentPlan( + Map> regionLocalityMap) throws IOException { + SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot(); + FavoredNodesPlan assignmentPlan = snapshot.getExistingAssignmentPlan(); + Set tables = snapshot.getTableSet(); + Map> tableToRegionsMap = snapshot + .getTableToRegionMap(); + for (TableName table : tables) { + float[] locality = new float[3]; + if (!this.targetTableSet.isEmpty() + && !this.targetTableSet.contains(table)) { + continue; + } + List regions = tableToRegionsMap.get(table); + for (HRegionInfo currentRegion : regions) { + Map regionLocality = regionLocalityMap.get(currentRegion + .getEncodedName()); + if (regionLocality == null) { + continue; + } + List servers = assignmentPlan.getFavoredNodes(currentRegion); + if (servers != null) { + int i = 0; + for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { + ServerName server = servers.get(p.ordinal()); + Float currentLocality = 0f; + if (servers != null) { + currentLocality = regionLocality.get(server.getHostname()); + if (currentLocality == null) { + currentLocality = 0f; + } + locality[i] += currentLocality; + } + i++; + } + } + } + for (int i = 0; i < locality.length; i++) { + String copy = null; + if (i == 0) { + copy = "primary"; + } else if (i == 1) { + copy = "secondary"; + } else if (i == 2) { + copy = "tertiary" ; + } + float avgLocality = 100 * locality[i] / regions.size(); + LOG.info("For Table: " + table + " ; #Total Regions: " + regions.size() + + " ; The average locality for " + copy+ " is " + avgLocality + " %"); + } + printDispersionScores(table, snapshot, regions.size(), null, false); + } + } + + /** + * @param favoredNodesStr The String of favored nodes + * @return the list of ServerName for the byte array of favored nodes. + */ + public static List getFavoredNodeList(String favoredNodesStr) { + String[] favoredNodesArray = StringUtils.split(favoredNodesStr, ","); + if (favoredNodesArray == null) + return null; + + List serverList = new ArrayList(); + for (String hostNameAndPort : favoredNodesArray) { + serverList.add(new ServerName(hostNameAndPort, ServerName.NON_STARTCODE)); + } + return serverList; + } + + public static void main(String args[]) throws IOException { + Options opt = new Options(); + opt.addOption("w", "write", false, "write the assignments to META only"); + opt.addOption("u", "update", false, + "update the assignments to META and RegionServers together"); + opt.addOption("n", "dry-run", false, "do not write assignments to META"); + opt.addOption("v", "verify", false, "verify current assignments against META"); + opt.addOption("p", "print", false, "print the current assignment plan in META"); + opt.addOption("h", "help", false, "print usage"); + opt.addOption("d", "verification-details", false, + "print the details of verification report"); + + opt.addOption("zk", true, "to set the zookeeper quorum"); + opt.addOption("fs", true, "to set HDFS"); + opt.addOption("hbase_root", true, "to set hbase_root directory"); + + opt.addOption("overwrite", false, + "overwrite the favored nodes for a single region," + + "for example: -update -r regionName -f server1:port,server2:port,server3:port"); + opt.addOption("r", true, "The region name that needs to be updated"); + opt.addOption("f", true, "The new favored nodes"); + + opt.addOption("tables", true, + "The list of table names splitted by ',' ;" + + "For example: -tables: t1,t2,...,tn"); + opt.addOption("l", "locality", true, "enforce the maxium locality"); + opt.addOption("m", "min-move", true, "enforce minium assignment move"); + opt.addOption("diff", false, "calculate difference between assignment plans"); + opt.addOption("munkres", false, + "use munkres to place secondaries and tertiaries"); + opt.addOption("ld", "locality-dispersion", false, "print locality and dispersion " + + "information for current plan"); + try { + // Set the log4j + Logger.getLogger("org.apache.zookeeper").setLevel(Level.ERROR); + Logger.getLogger("org.apache.hadoop.hbase").setLevel(Level.ERROR); + Logger.getLogger("org.apache.hadoop.hbase.master.RegionPlacementMaintainer") + .setLevel(Level.INFO); + + CommandLine cmd = new GnuParser().parse(opt, args); + Configuration conf = HBaseConfiguration.create(); + + boolean enforceMinAssignmentMove = true; + boolean enforceLocality = true; + boolean verificationDetails = false; + + // Read all the options + if ((cmd.hasOption("l") && + cmd.getOptionValue("l").equalsIgnoreCase("false")) || + (cmd.hasOption("locality") && + cmd.getOptionValue("locality").equalsIgnoreCase("false"))) { + enforceLocality = false; + } + + if ((cmd.hasOption("m") && + cmd.getOptionValue("m").equalsIgnoreCase("false")) || + (cmd.hasOption("min-move") && + cmd.getOptionValue("min-move").equalsIgnoreCase("false"))) { + enforceMinAssignmentMove = false; + } + + if (cmd.hasOption("zk")) { + conf.set(HConstants.ZOOKEEPER_QUORUM, cmd.getOptionValue("zk")); + LOG.info("Setting the zk quorum: " + conf.get(HConstants.ZOOKEEPER_QUORUM)); + } + + if (cmd.hasOption("fs")) { + conf.set(FileSystem.FS_DEFAULT_NAME_KEY, cmd.getOptionValue("fs")); + LOG.info("Setting the HDFS: " + conf.get(FileSystem.FS_DEFAULT_NAME_KEY)); + } + + if (cmd.hasOption("hbase_root")) { + conf.set(HConstants.HBASE_DIR, cmd.getOptionValue("hbase_root")); + LOG.info("Setting the hbase root directory: " + conf.get(HConstants.HBASE_DIR)); + } + + // Create the region placement obj + RegionPlacementMaintainer rp = new RegionPlacementMaintainer(conf, enforceLocality, + enforceMinAssignmentMove); + + if (cmd.hasOption("d") || cmd.hasOption("verification-details")) { + verificationDetails = true; + } + + if (cmd.hasOption("tables")) { + String tableNameListStr = cmd.getOptionValue("tables"); + String[] tableNames = StringUtils.split(tableNameListStr, ","); + rp.setTargetTableName(tableNames); + } + + if (cmd.hasOption("munkres")) { + USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY = true; + } + + // Read all the modes + if (cmd.hasOption("v") || cmd.hasOption("verify")) { + // Verify the region placement. + rp.verifyRegionPlacement(verificationDetails); + } else if (cmd.hasOption("n") || cmd.hasOption("dry-run")) { + // Generate the assignment plan only without updating the META and RS + FavoredNodesPlan plan = rp.getNewAssignmentPlan(); + printAssignmentPlan(plan); + } else if (cmd.hasOption("w") || cmd.hasOption("write")) { + // Generate the new assignment plan + FavoredNodesPlan plan = rp.getNewAssignmentPlan(); + // Print the new assignment plan + printAssignmentPlan(plan); + // Write the new assignment plan to META + rp.updateAssignmentPlanToMeta(plan); + } else if (cmd.hasOption("u") || cmd.hasOption("update")) { + // Generate the new assignment plan + FavoredNodesPlan plan = rp.getNewAssignmentPlan(); + // Print the new assignment plan + printAssignmentPlan(plan); + // Update the assignment to META and Region Servers + rp.updateAssignmentPlan(plan); + } else if (cmd.hasOption("diff")) { + FavoredNodesPlan newPlan = rp.getNewAssignmentPlan(); + Map> locality = FSUtils + .getRegionDegreeLocalityMappingFromFS(conf); + Map movesPerTable = rp.getRegionsMovement(newPlan); + rp.checkDifferencesWithOldPlan(movesPerTable, locality, newPlan); + System.out.println("Do you want to update the assignment plan? [y/n]"); + Scanner s = new Scanner(System.in); + String input = s.nextLine().trim(); + if (input.equals("y")) { + System.out.println("Updating assignment plan..."); + rp.updateAssignmentPlan(newPlan); + } + s.close(); + } else if (cmd.hasOption("ld")) { + Map> locality = FSUtils + .getRegionDegreeLocalityMappingFromFS(conf); + rp.printLocalityAndDispersionForCurrentPlan(locality); + } else if (cmd.hasOption("p") || cmd.hasOption("print")) { + FavoredNodesPlan plan = rp.getRegionAssignmentSnapshot().getExistingAssignmentPlan(); + printAssignmentPlan(plan); + } else if (cmd.hasOption("overwrite")) { + if (!cmd.hasOption("f") || !cmd.hasOption("r")) { + throw new IllegalArgumentException("Please specify: " + + " -update -r regionName -f server1:port,server2:port,server3:port"); + } + + String regionName = cmd.getOptionValue("r"); + String favoredNodesStr = cmd.getOptionValue("f"); + LOG.info("Going to update the region " + regionName + " with the new favored nodes " + + favoredNodesStr); + List favoredNodes = null; + HRegionInfo regionInfo = + rp.getRegionAssignmentSnapshot().getRegionNameToRegionInfoMap().get(regionName); + if (regionInfo == null) { + LOG.error("Cannot find the region " + regionName + " from the META"); + } else { + try { + favoredNodes = getFavoredNodeList(favoredNodesStr); + } catch (IllegalArgumentException e) { + LOG.error("Cannot parse the invalid favored nodes because " + e); + } + FavoredNodesPlan newPlan = new FavoredNodesPlan(); + newPlan.updateAssignmentPlan(regionInfo, favoredNodes); + rp.updateAssignmentPlan(newPlan); + } + } else { + printHelp(opt); + } + } catch (ParseException e) { + printHelp(opt); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java new file mode 100644 index 00000000000..80d2e270a9f --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java @@ -0,0 +1,217 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.catalog.MetaReader; +import org.apache.hadoop.hbase.catalog.MetaReader.Visitor; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper; +import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan; +import org.apache.hadoop.hbase.util.Pair; + +/** + * Used internally for reading meta and constructing datastructures that are + * then queried, for things like regions to regionservers, table to regions, etc. + * It also records the favored nodes mapping for regions. + * + */ +@InterfaceAudience.Private +public class SnapshotOfRegionAssignmentFromMeta { + private static final Log LOG = LogFactory.getLog(SnapshotOfRegionAssignmentFromMeta.class + .getName()); + + private CatalogTracker tracker; + + /** the table name to region map */ + private final Map> tableToRegionMap; + /** the region to region server map */ + //private final Map regionToRegionServerMap; + private Map regionToRegionServerMap; + /** the region name to region info map */ + private final Map regionNameToRegionInfoMap; + + /** the regionServer to region map */ + private final Map> regionServerToRegionMap; + /** the existing assignment plan in the META region */ + private final FavoredNodesPlan existingAssignmentPlan; + private final Set disabledTables; + private final boolean excludeOfflinedSplitParents; + + public SnapshotOfRegionAssignmentFromMeta(CatalogTracker tracker) { + this(tracker, new HashSet(), false); + } + + public SnapshotOfRegionAssignmentFromMeta(CatalogTracker tracker, Set disabledTables, + boolean excludeOfflinedSplitParents) { + this.tracker = tracker; + tableToRegionMap = new HashMap>(); + regionToRegionServerMap = new HashMap(); + regionServerToRegionMap = new HashMap>(); + regionNameToRegionInfoMap = new TreeMap(); + existingAssignmentPlan = new FavoredNodesPlan(); + this.disabledTables = disabledTables; + this.excludeOfflinedSplitParents = excludeOfflinedSplitParents; + } + + /** + * Initialize the region assignment snapshot by scanning the META table + * @throws IOException + */ + public void initialize() throws IOException { + LOG.info("Start to scan the META for the current region assignment " + + "snappshot"); + // TODO: at some point this code could live in the MetaReader + Visitor v = new Visitor() { + @Override + public boolean visit(Result result) throws IOException { + try { + if (result == null || result.isEmpty()) return true; + Pair regionAndServer = + HRegionInfo.getHRegionInfoAndServerName(result); + HRegionInfo hri = regionAndServer.getFirst(); + if (hri == null) return true; + if (hri.getTableName() == null) return true; + if (disabledTables.contains(hri.getTableName())) { + return true; + } + // Are we to include split parents in the list? + if (excludeOfflinedSplitParents && hri.isSplit()) return true; + // Add the current assignment to the snapshot + addAssignment(hri, regionAndServer.getSecond()); + addRegion(hri); + + // the code below is to handle favored nodes + byte[] favoredNodes = result.getValue(HConstants.CATALOG_FAMILY, + FavoredNodeAssignmentHelper.FAVOREDNODES_QUALIFIER); + if (favoredNodes == null) return true; + // Add the favored nodes into assignment plan + ServerName[] favoredServerList = + FavoredNodeAssignmentHelper.getFavoredNodesList(favoredNodes); + // Add the favored nodes into assignment plan + existingAssignmentPlan.updateFavoredNodesMap(hri, + Arrays.asList(favoredServerList)); + return true; + } catch (RuntimeException e) { + LOG.error("Catche remote exception " + e.getMessage() + + " when processing" + result); + throw e; + } + } + }; + // Scan .META. to pick up user regions + MetaReader.fullScan(tracker, v); + //regionToRegionServerMap = regions; + LOG.info("Finished to scan the META for the current region assignment" + + "snapshot"); + } + + private void addRegion(HRegionInfo regionInfo) { + // Process the region name to region info map + regionNameToRegionInfoMap.put(regionInfo.getRegionNameAsString(), regionInfo); + + // Process the table to region map + TableName tableName = regionInfo.getTableName(); + List regionList = tableToRegionMap.get(tableName); + if (regionList == null) { + regionList = new ArrayList(); + } + // Add the current region info into the tableToRegionMap + regionList.add(regionInfo); + tableToRegionMap.put(tableName, regionList); + } + + private void addAssignment(HRegionInfo regionInfo, ServerName server) { + // Process the region to region server map + regionToRegionServerMap.put(regionInfo, server); + + // Process the region server to region map + List regionList = regionServerToRegionMap.get(server); + if (regionList == null) { + regionList = new ArrayList(); + } + regionList.add(regionInfo); + regionServerToRegionMap.put(server, regionList); + } + + /** + * Get the regioninfo for a region + * @return the regioninfo + */ + public Map getRegionNameToRegionInfoMap() { + return this.regionNameToRegionInfoMap; + } + + /** + * Get regions for tables + * @return a mapping from table to regions + */ + public Map> getTableToRegionMap() { + return tableToRegionMap; + } + + /** + * Get region to region server map + * @return region to region server map + */ + public Map getRegionToRegionServerMap() { + return regionToRegionServerMap; + } + + /** + * Get regionserver to region map + * @return regionserver to region map + */ + public Map> getRegionServerToRegionMap() { + return regionServerToRegionMap; + } + + /** + * Get the favored nodes plan + * @return the existing favored nodes plan + */ + public FavoredNodesPlan getExistingAssignmentPlan() { + return this.existingAssignmentPlan; + } + + /** + * Get the table set + * @return the table set + */ + public Set getTableSet() { + return this.tableToRegionMap.keySet(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java index 370f8c052ad..5413d0ba81c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java @@ -21,14 +21,13 @@ package org.apache.hadoop.hbase.master.balancer; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; -import java.util.TreeMap; +import java.util.Map.Entry; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -40,17 +39,14 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaEditor; -import org.apache.hadoop.hbase.catalog.MetaReader; -import org.apache.hadoop.hbase.catalog.MetaReader.Visitor; +import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.master.RackManager; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.Pair; import com.google.protobuf.InvalidProtocolBufferException; @@ -88,55 +84,11 @@ public class FavoredNodeAssignmentHelper { } /** - * Perform full scan of the meta table similar to - * {@link MetaReader#fullScan(CatalogTracker, Set, boolean)} except that this is - * aware of the favored nodes + * Update meta table with favored nodes info + * @param regionToFavoredNodes * @param catalogTracker - * @param disabledTables - * @param excludeOfflinedSplitParents - * @param balancer required because we need to let the balancer know about the - * current favored nodes from meta scan - * @return Returns a map of every region to it's currently assigned server, - * according to META. If the region does not have an assignment it will have - * a null value in the map. * @throws IOException */ - public static Map fullScan( - CatalogTracker catalogTracker, final Set disabledTables, - final boolean excludeOfflinedSplitParents, - FavoredNodeLoadBalancer balancer) throws IOException { - final Map regions = - new TreeMap(); - final Map favoredNodesMap = - new HashMap(); - Visitor v = new Visitor() { - @Override - public boolean visit(Result r) throws IOException { - if (r == null || r.isEmpty()) return true; - Pair region = HRegionInfo.getHRegionInfoAndServerName(r); - HRegionInfo hri = region.getFirst(); - if (hri == null) return true; - if (hri.getTableName() == null) return true; - if (disabledTables.contains( - hri.getTableName())) return true; - // Are we to include split parents in the list? - if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; - regions.put(hri, region.getSecond()); - byte[] favoredNodes = r.getValue(HConstants.CATALOG_FAMILY, - FavoredNodeAssignmentHelper.FAVOREDNODES_QUALIFIER); - if (favoredNodes != null) { - ServerName[] favoredServerList = - FavoredNodeAssignmentHelper.getFavoredNodesList(favoredNodes); - favoredNodesMap.put(hri, favoredServerList); - } - return true; - } - }; - MetaReader.fullScan(catalogTracker, v); - balancer.noteFavoredNodes(favoredNodesMap); - return regions; - } - public static void updateMetaWithFavoredNodesInfo( Map> regionToFavoredNodes, CatalogTracker catalogTracker) throws IOException { @@ -151,6 +103,33 @@ public class FavoredNodeAssignmentHelper { LOG.info("Added " + puts.size() + " regions in META"); } + /** + * Update meta table with favored nodes info + * @param regionToFavoredNodes + * @param conf + * @throws IOException + */ + public static void updateMetaWithFavoredNodesInfo( + Map> regionToFavoredNodes, + Configuration conf) throws IOException { + List puts = new ArrayList(); + for (Map.Entry> entry : regionToFavoredNodes.entrySet()) { + Put put = makePutFromRegionInfo(entry.getKey(), entry.getValue()); + if (put != null) { + puts.add(put); + } + } + // Write the region assignments to the meta table. + HTable metaTable = null; + try { + metaTable = new HTable(conf, TableName.META_TABLE_NAME); + metaTable.put(puts); + } finally { + if (metaTable != null) metaTable.close(); + } + LOG.info("Added " + puts.size() + " regions in META"); + } + /** * Generates and returns a Put containing the region info for the catalog table * and the servers @@ -190,10 +169,10 @@ public class FavoredNodeAssignmentHelper { } /** - * @param serverList + * @param serverAddrList * @return PB'ed bytes of {@link FavoredNodes} generated by the server list. */ - static byte[] getFavoredNodes(List serverAddrList) { + public static byte[] getFavoredNodes(List serverAddrList) { FavoredNodes.Builder f = FavoredNodes.newBuilder(); for (ServerName s : serverAddrList) { HBaseProtos.ServerName.Builder b = HBaseProtos.ServerName.newBuilder(); @@ -296,13 +275,164 @@ public class FavoredNodeAssignmentHelper { } } catch (Exception e) { LOG.warn("Cannot place the favored nodes for region " + - regionInfo.getRegionNameAsString() + " because " + e); + regionInfo.getRegionNameAsString() + " because " + e, e); continue; } } return secondaryAndTertiaryMap; } + private Map> mapRSToPrimaries( + Map primaryRSMap) { + Map> primaryServerMap = + new HashMap>(); + for (Entry e : primaryRSMap.entrySet()) { + Set currentSet = primaryServerMap.get(e.getValue()); + if (currentSet == null) { + currentSet = new HashSet(); + } + currentSet.add(e.getKey()); + primaryServerMap.put(e.getValue(), currentSet); + } + return primaryServerMap; + } + + /** + * For regions that share the primary, avoid placing the secondary and tertiary + * on a same RS. Used for generating new assignments for the + * primary/secondary/tertiary RegionServers + * @param primaryRSMap + * @return the map of regions to the servers the region-files should be hosted on + * @throws IOException + */ + public Map placeSecondaryAndTertiaryWithRestrictions( + Map primaryRSMap) { + Map> serverToPrimaries = + mapRSToPrimaries(primaryRSMap); + Map secondaryAndTertiaryMap = + new HashMap(); + + for (Entry entry : primaryRSMap.entrySet()) { + // Get the target region and its primary region server rack + HRegionInfo regionInfo = entry.getKey(); + ServerName primaryRS = entry.getValue(); + try { + // Get the rack for the primary region server + String primaryRack = rackManager.getRack(primaryRS); + ServerName[] favoredNodes = null; + if (getTotalNumberOfRacks() == 1) { + // Single rack case: have to pick the secondary and tertiary + // from the same rack + favoredNodes = singleRackCase(regionInfo, primaryRS, primaryRack); + } else { + favoredNodes = multiRackCaseWithRestrictions(serverToPrimaries, + secondaryAndTertiaryMap, primaryRack, primaryRS, regionInfo); + } + if (favoredNodes != null) { + secondaryAndTertiaryMap.put(regionInfo, favoredNodes); + LOG.debug("Place the secondary and tertiary region server for region " + + regionInfo.getRegionNameAsString()); + } + } catch (Exception e) { + LOG.warn("Cannot place the favored nodes for region " + + regionInfo.getRegionNameAsString() + " because " + e, e); + continue; + } + } + return secondaryAndTertiaryMap; + } + + private ServerName[] multiRackCaseWithRestrictions( + Map> serverToPrimaries, + Map secondaryAndTertiaryMap, + String primaryRack, ServerName primaryRS, HRegionInfo regionInfo) throws IOException { + // Random to choose the secondary and tertiary region server + // from another rack to place the secondary and tertiary + // Random to choose one rack except for the current rack + Set rackSkipSet = new HashSet(); + rackSkipSet.add(primaryRack); + String secondaryRack = getOneRandomRack(rackSkipSet); + List serverList = getServersFromRack(secondaryRack); + Set serverSet = new HashSet(); + serverSet.addAll(serverList); + ServerName[] favoredNodes; + if (serverList.size() >= 2) { + // Randomly pick up two servers from this secondary rack + // Skip the secondary for the tertiary placement + // skip the servers which share the primary already + Set primaries = serverToPrimaries.get(primaryRS); + Set skipServerSet = new HashSet(); + while (true) { + ServerName[] secondaryAndTertiary = null; + if (primaries.size() > 1) { + // check where his tertiary and secondary are + for (HRegionInfo primary : primaries) { + secondaryAndTertiary = secondaryAndTertiaryMap.get(primary); + if (secondaryAndTertiary != null) { + if (regionServerToRackMap.get(secondaryAndTertiary[0]).equals(secondaryRack)) { + skipServerSet.add(secondaryAndTertiary[0]); + } + if (regionServerToRackMap.get(secondaryAndTertiary[1]).equals(secondaryRack)) { + skipServerSet.add(secondaryAndTertiary[1]); + } + } + } + } + if (skipServerSet.size() + 2 <= serverSet.size()) + break; + skipServerSet.clear(); + rackSkipSet.add(secondaryRack); + // we used all racks + if (rackSkipSet.size() == getTotalNumberOfRacks()) { + // remove the last two added and break + skipServerSet.remove(secondaryAndTertiary[0]); + skipServerSet.remove(secondaryAndTertiary[1]); + break; + } + secondaryRack = getOneRandomRack(rackSkipSet); + serverList = getServersFromRack(secondaryRack); + serverSet = new HashSet(); + serverSet.addAll(serverList); + } + + // Place the secondary RS + ServerName secondaryRS = getOneRandomServer(secondaryRack, skipServerSet); + skipServerSet.add(secondaryRS); + // Place the tertiary RS + ServerName tertiaryRS = getOneRandomServer(secondaryRack, skipServerSet); + + if (secondaryRS == null || tertiaryRS == null) { + LOG.error("Cannot place the secondary and tertiary" + + " region server for region " + + regionInfo.getRegionNameAsString()); + } + // Create the secondary and tertiary pair + favoredNodes = new ServerName[2]; + favoredNodes[0] = secondaryRS; + favoredNodes[1] = tertiaryRS; + } else { + // Pick the secondary rs from this secondary rack + // and pick the tertiary from another random rack + favoredNodes = new ServerName[2]; + ServerName secondary = getOneRandomServer(secondaryRack); + favoredNodes[0] = secondary; + + // Pick the tertiary + if (getTotalNumberOfRacks() == 2) { + // Pick the tertiary from the same rack of the primary RS + Set serverSkipSet = new HashSet(); + serverSkipSet.add(primaryRS); + favoredNodes[1] = getOneRandomServer(primaryRack, serverSkipSet); + } else { + // Pick the tertiary from another rack + rackSkipSet.add(secondaryRack); + String tertiaryRandomRack = getOneRandomRack(rackSkipSet); + favoredNodes[1] = getOneRandomServer(tertiaryRandomRack); + } + } + return favoredNodes; + } + private ServerName[] singleRackCase(HRegionInfo regionInfo, ServerName primaryRS, String primaryRack) throws IOException { @@ -311,7 +441,7 @@ public class FavoredNodeAssignmentHelper { List serverList = getServersFromRack(primaryRack); if (serverList.size() <= 2) { // Single region server case: cannot not place the favored nodes - // on any server; !domain.canPlaceFavoredNodes() + // on any server; return null; } else { // Randomly select two region servers from the server list and make sure @@ -400,7 +530,7 @@ public class FavoredNodeAssignmentHelper { return (serverSize >= FAVORED_NODES_NUM); } - void initialize() { + public void initialize() { for (ServerName sn : this.servers) { String rackName = this.rackManager.getRack(sn); List serverList = this.rackToRegionServerMap.get(rackName); @@ -462,4 +592,14 @@ public class FavoredNodeAssignmentHelper { return randomRack; } + + public static String getFavoredNodesAsString(List nodes) { + StringBuffer strBuf = new StringBuffer(); + int i = 0; + for (ServerName node : nodes) { + strBuf.append(node.getHostAndPort()); + if (++i != nodes.size()) strBuf.append(";"); + } + return strBuf.toString(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java index c76ce447483..d8cee204677 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.master.balancer; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -30,12 +29,15 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.hadoop.hbase.master.RackManager; import org.apache.hadoop.hbase.master.RegionPlan; -import org.apache.hadoop.hbase.master.balancer.FavoredNodes.Position; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta; +import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan.Position; import org.apache.hadoop.hbase.util.Pair; /** @@ -55,23 +57,91 @@ import org.apache.hadoop.hbase.util.Pair; public class FavoredNodeLoadBalancer extends BaseLoadBalancer { private static final Log LOG = LogFactory.getLog(FavoredNodeLoadBalancer.class); - private FavoredNodes globalFavoredNodesAssignmentPlan; + private FavoredNodesPlan globalFavoredNodesAssignmentPlan; private RackManager rackManager; + Configuration conf; @Override public void setConf(Configuration conf) { - globalFavoredNodesAssignmentPlan = new FavoredNodes(); + globalFavoredNodesAssignmentPlan = new FavoredNodesPlan(); this.rackManager = new RackManager(conf); + this.conf = conf; } @Override - public List balanceCluster(Map> clusterState) { - //TODO. At a high level, this should look at the block locality per region, and - //then reassign regions based on which nodes have the most blocks of the region - //file(s). There could be different ways like minimize region movement, or, maximum - //locality, etc. The other dimension to look at is whether Stochastic loadbalancer - //can be integrated with this - throw new UnsupportedOperationException("Not implemented yet"); + public List balanceCluster(Map> clusterState) { + //TODO. Look at is whether Stochastic loadbalancer can be integrated with this + List plans = new ArrayList(); + //perform a scan of the meta to get the latest updates (if any) + SnapshotOfRegionAssignmentFromMeta snaphotOfRegionAssignment = + new SnapshotOfRegionAssignmentFromMeta(super.services.getCatalogTracker()); + try { + snaphotOfRegionAssignment.initialize(); + } catch (IOException ie) { + LOG.warn("Not running balancer since exception was thrown " + ie); + return plans; + } + globalFavoredNodesAssignmentPlan = snaphotOfRegionAssignment.getExistingAssignmentPlan(); + Map serverNameToServerNameWithoutCode = + new HashMap(); + Map serverNameWithoutCodeToServerName = + new HashMap(); + ServerManager serverMgr = super.services.getServerManager(); + for (ServerName sn: serverMgr.getOnlineServersList()) { + ServerName s = new ServerName(sn.getHostname(), sn.getPort(), ServerName.NON_STARTCODE); + serverNameToServerNameWithoutCode.put(sn, s); + serverNameWithoutCodeToServerName.put(s, sn); + } + for (Map.Entry> entry : clusterState.entrySet()) { + ServerName currentServer = entry.getKey(); + //get a server without the startcode for the currentServer + ServerName currentServerWithoutStartCode = new ServerName(currentServer.getHostname(), + currentServer.getPort(), ServerName.NON_STARTCODE); + List list = entry.getValue(); + for (HRegionInfo region : list) { + if(region.getTableName().getNamespaceAsString() + .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { + continue; + } + List favoredNodes = globalFavoredNodesAssignmentPlan.getFavoredNodes(region); + if (favoredNodes == null || favoredNodes.get(0).equals(currentServerWithoutStartCode)) { + continue; //either favorednodes does not exist or we are already on the primary node + } + ServerName destination = null; + //check whether the primary is available + destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(0)); + if (destination == null) { + //check whether the region is on secondary/tertiary + if (currentServerWithoutStartCode.equals(favoredNodes.get(1)) || + currentServerWithoutStartCode.equals(favoredNodes.get(2))) { + continue; + } + //the region is currently on none of the favored nodes + //get it on one of them if possible + ServerLoad l1 = super.services.getServerManager().getLoad( + serverNameWithoutCodeToServerName.get(favoredNodes.get(1))); + ServerLoad l2 = super.services.getServerManager().getLoad( + serverNameWithoutCodeToServerName.get(favoredNodes.get(2))); + if (l1 != null && l2 != null) { + if (l1.getLoad() > l2.getLoad()) { + destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(2)); + } else { + destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(1)); + } + } else if (l1 != null) { + destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(1)); + } else if (l2 != null) { + destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(2)); + } + } + + if (destination != null) { + RegionPlan plan = new RegionPlan(region, currentServer, destination); + plans.add(plan); + } + } + } + return plans; } @Override @@ -168,8 +238,8 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer { for (ServerName s : favoredNodes) { ServerName serverWithLegitStartCode = availableServersContains(availableServers, s); if (serverWithLegitStartCode != null) { - FavoredNodes.Position position = - FavoredNodes.getFavoredServerPosition(favoredNodes, s); + FavoredNodesPlan.Position position = + FavoredNodesPlan.getFavoredServerPosition(favoredNodes, s); if (Position.PRIMARY.equals(position)) { primaryHost = serverWithLegitStartCode; } else if (Position.SECONDARY.equals(position)) { @@ -243,7 +313,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer { private void roundRobinAssignmentImpl(FavoredNodeAssignmentHelper assignmentHelper, Map> assignmentMap, - List regions, List servers) throws IOException { + List regions, List servers) { Map primaryRSMap = new HashMap(); // figure the primary RSs assignmentHelper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); @@ -274,12 +344,4 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer { globalFavoredNodesAssignmentPlan.updateFavoredNodesMap(region, favoredNodesForRegion); } } - - void noteFavoredNodes(final Map favoredNodesMap) { - for (Map.Entry entry : favoredNodesMap.entrySet()) { - // the META should already have favorednode ServerName objects without startcode - globalFavoredNodesAssignmentPlan.updateFavoredNodesMap(entry.getKey(), - Arrays.asList(entry.getValue())); - } - } } \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodesPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodesPlan.java new file mode 100644 index 00000000000..a4303905299 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodesPlan.java @@ -0,0 +1,155 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.balancer; + +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.jboss.netty.util.internal.ConcurrentHashMap; + +/** + * This class contains the mapping information between each region and + * its favored region server list. Used by {@link FavoredNodeLoadBalancer} set + * of classes and from unit tests (hence the class is public) + * + * All the access to this class is thread-safe. + */ +@InterfaceAudience.Private +public class FavoredNodesPlan { + protected static final Log LOG = LogFactory.getLog( + FavoredNodesPlan.class.getName()); + + /** the map between each region and its favored region server list */ + private Map> favoredNodesMap; + + public static enum Position { + PRIMARY, + SECONDARY, + TERTIARY; + }; + + public FavoredNodesPlan() { + favoredNodesMap = new ConcurrentHashMap>(); + } + + /** + * Add an assignment to the plan + * @param region + * @param servers + */ + public synchronized void updateFavoredNodesMap(HRegionInfo region, + List servers) { + if (region == null || servers == null || servers.size() ==0) + return; + this.favoredNodesMap.put(region, servers); + } + + /** + * @param region + * @return the list of favored region server for this region based on the plan + */ + public synchronized List getFavoredNodes(HRegionInfo region) { + return favoredNodesMap.get(region); + } + + /** + * Return the position of the server in the favoredNodes list. Assumes the + * favoredNodes list is of size 3. + * @param favoredNodes + * @param server + * @return position + */ + public static Position getFavoredServerPosition( + List favoredNodes, ServerName server) { + if (favoredNodes == null || server == null || + favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { + return null; + } + for (Position p : Position.values()) { + if (favoredNodes.get(p.ordinal()).equals(server)) { + return p; + } + } + return null; + } + + /** + * @return the mapping between each region to its favored region server list + */ + public synchronized Map> getAssignmentMap() { + return this.favoredNodesMap; + } + + /** + * Add an assignment to the plan + * @param region + * @param servers + */ + public synchronized void updateAssignmentPlan(HRegionInfo region, + List servers) { + if (region == null || servers == null || servers.size() ==0) + return; + this.favoredNodesMap.put(region, servers); + LOG.info("Update the assignment plan for region " + + region.getRegionNameAsString() + " ; favored nodes " + + FavoredNodeAssignmentHelper.getFavoredNodesAsString(servers)); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null) { + return false; + } + if (getClass() != o.getClass()) { + return false; + } + // To compare the map from objec o is identical to current assignment map. + Map> comparedMap= + ((FavoredNodesPlan)o).getAssignmentMap(); + + // compare the size + if (comparedMap.size() != this.favoredNodesMap.size()) + return false; + + // compare each element in the assignment map + for (Map.Entry> entry : + comparedMap.entrySet()) { + List serverList = this.favoredNodesMap.get(entry.getKey()); + if (serverList == null && entry.getValue() != null) { + return false; + } else if (serverList != null && !serverList.equals(entry.getValue())) { + return false; + } + } + return true; + } + + @Override + public int hashCode() { + return favoredNodesMap.hashCode(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index b44fbb04656..2e430b3f4e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -145,6 +145,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ActionResult; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; @@ -4341,4 +4343,18 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa } return result; } + + @Override + public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller, + UpdateFavoredNodesRequest request) throws ServiceException { + List openInfoList = request.getUpdateInfoList(); + UpdateFavoredNodesResponse.Builder respBuilder = UpdateFavoredNodesResponse.newBuilder(); + for (UpdateFavoredNodesRequest.RegionUpdateInfo regionUpdateInfo : openInfoList) { + HRegionInfo hri = HRegionInfo.convert(regionUpdateInfo.getRegion()); + updateRegionFavoredNodesMapping(hri.getEncodedName(), + regionUpdateInfo.getFavoredNodesList()); + } + respBuilder.setResponse(openInfoList.size()); + return respBuilder.build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java new file mode 100644 index 00000000000..4c0cc464756 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java @@ -0,0 +1,169 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +/** + * Thread that walks over the filesystem, and computes the mappings + * BestHost> and Map> + * + */ +@InterfaceAudience.Private +class FSRegionScanner implements Runnable { + static private final Log LOG = LogFactory.getLog(FSRegionScanner.class); + + private Path regionPath; + + /** + * The file system used + */ + private FileSystem fs; + + /** + * Maps each region to the RS with highest locality for that region. + */ + private Map regionToBestLocalityRSMapping; + + /** + * Maps region encoded names to maps of hostnames to fractional locality of + * that region on that host. + */ + private Map> regionDegreeLocalityMapping; + + FSRegionScanner(FileSystem fs, Path regionPath, + Map regionToBestLocalityRSMapping, + Map> regionDegreeLocalityMapping) { + this.fs = fs; + this.regionPath = regionPath; + this.regionToBestLocalityRSMapping = regionToBestLocalityRSMapping; + this.regionDegreeLocalityMapping = regionDegreeLocalityMapping; + } + + @Override + public void run() { + try { + // empty the map for each region + Map blockCountMap = new HashMap(); + + //get table name + String tableName = regionPath.getParent().getName(); + int totalBlkCount = 0; + + // ignore null + FileStatus[] cfList = fs.listStatus(regionPath); + if (null == cfList) { + return; + } + + // for each cf, get all the blocks information + for (FileStatus cfStatus : cfList) { + if (!cfStatus.isDir()) { + // skip because this is not a CF directory + continue; + } + if (cfStatus.getPath().getName().startsWith(".")) { + continue; + } + FileStatus[] storeFileLists = fs.listStatus(cfStatus.getPath()); + if (null == storeFileLists) { + continue; + } + + for (FileStatus storeFile : storeFileLists) { + BlockLocation[] blkLocations = + fs.getFileBlockLocations(storeFile, 0, storeFile.getLen()); + if (null == blkLocations) { + continue; + } + + totalBlkCount += blkLocations.length; + for(BlockLocation blk: blkLocations) { + for (String host: blk.getHosts()) { + AtomicInteger count = blockCountMap.get(host); + if (count == null) { + count = new AtomicInteger(0); + blockCountMap.put(host, count); + } + count.incrementAndGet(); + } + } + } + } + + if (regionToBestLocalityRSMapping != null) { + int largestBlkCount = 0; + String hostToRun = null; + for (Map.Entry entry : blockCountMap.entrySet()) { + String host = entry.getKey(); + + int tmp = entry.getValue().get(); + if (tmp > largestBlkCount) { + largestBlkCount = tmp; + hostToRun = host; + } + } + + // empty regions could make this null + if (null == hostToRun) { + return; + } + + if (hostToRun.endsWith(".")) { + hostToRun = hostToRun.substring(0, hostToRun.length()-1); + } + String name = tableName + ":" + regionPath.getName(); + synchronized (regionToBestLocalityRSMapping) { + regionToBestLocalityRSMapping.put(name, hostToRun); + } + } + + if (regionDegreeLocalityMapping != null && totalBlkCount > 0) { + Map hostLocalityMap = new HashMap(); + for (Map.Entry entry : blockCountMap.entrySet()) { + String host = entry.getKey(); + if (host.endsWith(".")) { + host = host.substring(0, host.length() - 1); + } + // Locality is fraction of blocks local to this host. + float locality = ((float)entry.getValue().get()) / totalBlkCount; + hostLocalityMap.put(host, locality); + } + // Put the locality map into the result map, keyed by the encoded name + // of the region. + regionDegreeLocalityMapping.put(regionPath.getName(), hostLocalityMap); + } + } catch (IOException e) { + LOG.warn("Problem scanning file system", e); + } catch (RuntimeException e) { + LOG.warn("Problem scanning file system", e); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 343a83c44ae..0fd9df7366a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -35,6 +35,10 @@ import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; import org.apache.commons.logging.Log; @@ -61,6 +65,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.RegionPlacementMaintainer; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.FSProtos; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -87,6 +92,8 @@ public abstract class FSUtils { /** Full access permissions (starting point for a umask) */ private static final String FULL_RWX_PERMISSIONS = "777"; + private static final String THREAD_POOLSIZE = "hbase.client.localityCheck.threadPoolSize"; + private static final int DEFAULT_THREAD_POOLSIZE = 2; /** Set to true on Windows platforms */ public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows"); @@ -1698,4 +1705,181 @@ public abstract class FSUtils { fs.setTimes(src, EnvironmentEdgeManager.currentTimeMillis(), -1); return fs.rename(src, dest); } + + /** + * This function is to scan the root path of the file system to get the + * degree of locality for each region on each of the servers having at least + * one block of that region. + * This is used by the tool {@link RegionPlacementMaintainer} + * + * @param conf + * the configuration to use + * @return the mapping from region encoded name to a map of server names to + * locality fraction + * @throws IOException + * in case of file system errors or interrupts + */ + public static Map> getRegionDegreeLocalityMappingFromFS( + final Configuration conf) throws IOException { + return getRegionDegreeLocalityMappingFromFS( + conf, null, + conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE)); + + } + + /** + * This function is to scan the root path of the file system to get the + * degree of locality for each region on each of the servers having at least + * one block of that region. + * + * @param conf + * the configuration to use + * @param desiredTable + * the table you wish to scan locality for + * @param threadPoolSize + * the thread pool size to use + * @return the mapping from region encoded name to a map of server names to + * locality fraction + * @throws IOException + * in case of file system errors or interrupts + */ + public static Map> getRegionDegreeLocalityMappingFromFS( + final Configuration conf, final String desiredTable, int threadPoolSize) + throws IOException { + Map> regionDegreeLocalityMapping = + new ConcurrentHashMap>(); + getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, null, + regionDegreeLocalityMapping); + return regionDegreeLocalityMapping; + } + + /** + * This function is to scan the root path of the file system to get either the + * mapping between the region name and its best locality region server or the + * degree of locality of each region on each of the servers having at least + * one block of that region. The output map parameters are both optional. + * + * @param conf + * the configuration to use + * @param desiredTable + * the table you wish to scan locality for + * @param threadPoolSize + * the thread pool size to use + * @param regionToBestLocalityRSMapping + * the map into which to put the best locality mapping or null + * @param regionDegreeLocalityMapping + * the map into which to put the locality degree mapping or null, + * must be a thread-safe implementation + * @throws IOException + * in case of file system errors or interrupts + */ + private static void getRegionLocalityMappingFromFS( + final Configuration conf, final String desiredTable, + int threadPoolSize, + Map regionToBestLocalityRSMapping, + Map> regionDegreeLocalityMapping) + throws IOException { + FileSystem fs = FileSystem.get(conf); + Path rootPath = FSUtils.getRootDir(conf); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + Path queryPath; + // The table files are in ${hbase.rootdir}/data///* + if (null == desiredTable) { + queryPath = new Path(new Path(rootPath, HConstants.BASE_NAMESPACE_DIR), "/*/*/"); + } else { + queryPath = new Path(FSUtils.getTableDir(rootPath, TableName.valueOf(desiredTable)), "/*/"); + } + + // reject all paths that are not appropriate + PathFilter pathFilter = new PathFilter() { + @Override + public boolean accept(Path path) { + // this is the region name; it may get some noise data + if (null == path) { + return false; + } + + // no parent? + Path parent = path.getParent(); + if (null == parent) { + return false; + } + + // not part of a table? + if (!parent.getName().equals(TableName.META_TABLE_NAME.getQualifierAsString())) { + return false; + } + + String regionName = path.getName(); + if (null == regionName) { + return false; + } + + if (!regionName.toLowerCase().matches("[0-9a-f]+")) { + return false; + } + return true; + } + }; + + FileStatus[] statusList = fs.globStatus(queryPath, pathFilter); + + if (null == statusList) { + return; + } else { + LOG.debug("Query Path: " + queryPath + " ; # list of files: " + + statusList.length); + } + + // lower the number of threads in case we have very few expected regions + threadPoolSize = Math.min(threadPoolSize, statusList.length); + + // run in multiple threads + ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadPoolSize, + threadPoolSize, 60, TimeUnit.SECONDS, + new ArrayBlockingQueue(statusList.length)); + try { + // ignore all file status items that are not of interest + for (FileStatus regionStatus : statusList) { + if (null == regionStatus) { + continue; + } + + if (!regionStatus.isDir()) { + continue; + } + + Path regionPath = regionStatus.getPath(); + if (null == regionPath) { + continue; + } + + tpe.execute(new FSRegionScanner(fs, regionPath, + regionToBestLocalityRSMapping, regionDegreeLocalityMapping)); + } + } finally { + tpe.shutdown(); + int threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, + 60 * 1000); + try { + // here we wait until TPE terminates, which is either naturally or by + // exceptions in the execution of the threads + while (!tpe.awaitTermination(threadWakeFrequency, + TimeUnit.MILLISECONDS)) { + // printing out rough estimate, so as to not introduce + // AtomicInteger + LOG.info("Locality checking is underway: { Scanned Regions : " + + tpe.getCompletedTaskCount() + "/" + + tpe.getTaskCount() + " }"); + } + } catch (InterruptedException e) { + throw new IOException(e); + } + } + + long overhead = EnvironmentEdgeManager.currentTimeMillis() - startTime; + String overheadMsg = "Scan DFS for locality info takes " + overhead + " ms"; + + LOG.info(overheadMsg); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java new file mode 100644 index 00000000000..e39e9f65427 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java @@ -0,0 +1,516 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.util.Arrays; +import java.util.Deque; +import java.util.LinkedList; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Computes the optimal (minimal cost) assignment of jobs to workers (or other + * analogous) concepts given a cost matrix of each pair of job and worker, using + * the algorithm by James Munkres in "Algorithms for the Assignment and + * Transportation Problems", with additional optimizations as described by Jin + * Kue Wong in "A New Implementation of an Algorithm for the Optimal Assignment + * Problem: An Improved Version of Munkres' Algorithm". The algorithm runs in + * O(n^3) time and need O(n^2) auxiliary space where n is the number of jobs or + * workers, whichever is greater. + */ +@InterfaceAudience.Private +public class MunkresAssignment { + + // The original algorithm by Munkres uses the terms STAR and PRIME to denote + // different states of zero values in the cost matrix. These values are + // represented as byte constants instead of enums to save space in the mask + // matrix by a factor of 4n^2 where n is the size of the problem. + private static final byte NONE = 0; + private static final byte STAR = 1; + private static final byte PRIME = 2; + + // The algorithm requires that the number of column is at least as great as + // the number of rows. If that is not the case, then the cost matrix should + // be transposed before computation, and the solution matrix transposed before + // returning to the caller. + private final boolean transposed; + + // The number of rows of internal matrices. + private final int rows; + + // The number of columns of internal matrices. + private final int cols; + + // The cost matrix, the cost of assigning each row index to column index. + private float[][] cost; + + // Mask of zero cost assignment states. + private byte[][] mask; + + // Covering some rows of the cost matrix. + private boolean[] rowsCovered; + + // Covering some columns of the cost matrix. + private boolean[] colsCovered; + + // The alternating path between starred zeroes and primed zeroes + private Deque> path; + + // The solution, marking which rows should be assigned to which columns. The + // positions of elements in this array correspond to the rows of the cost + // matrix, and the value of each element correspond to the columns of the cost + // matrix, i.e. assignments[i] = j indicates that row i should be assigned to + // column j. + private int[] assignments; + + // Improvements described by Jin Kue Wong cache the least value in each row, + // as well as the column index of the least value in each row, and the pending + // adjustments to each row and each column. + private float[] leastInRow; + private int[] leastInRowIndex; + private float[] rowAdjust; + private float[] colAdjust; + + /** + * Construct a new problem instance with the specified cost matrix. The cost + * matrix must be rectangular, though not necessarily square. If one dimension + * is greater than the other, some elements in the greater dimension will not + * be assigned. The input cost matrix will not be modified. + * @param costMatrix + */ + public MunkresAssignment(float[][] costMatrix) { + // The algorithm assumes that the number of columns is at least as great as + // the number of rows. If this is not the case of the input matrix, then + // all internal structures must be transposed relative to the input. + this.transposed = costMatrix.length > costMatrix[0].length; + if (this.transposed) { + this.rows = costMatrix[0].length; + this.cols = costMatrix.length; + } else { + this.rows = costMatrix.length; + this.cols = costMatrix[0].length; + } + + cost = new float[rows][cols]; + mask = new byte[rows][cols]; + rowsCovered = new boolean[rows]; + colsCovered = new boolean[cols]; + path = new LinkedList>(); + + leastInRow = new float[rows]; + leastInRowIndex = new int[rows]; + rowAdjust = new float[rows]; + colAdjust = new float[cols]; + + assignments = null; + + // Copy cost matrix. + if (transposed) { + for (int r = 0; r < rows; r++) { + for (int c = 0; c < cols; c++) { + cost[r][c] = costMatrix[c][r]; + } + } + } else { + for (int r = 0; r < rows; r++) { + for (int c = 0; c < cols; c++) { + cost[r][c] = costMatrix[r][c]; + } + } + } + + // Costs must be finite otherwise the matrix can get into a bad state where + // no progress can be made. If your use case depends on a distinction + // between costs of MAX_VALUE and POSITIVE_INFINITY, you're doing it wrong. + for (int r = 0; r < rows; r++) { + for (int c = 0; c < cols; c++) { + if (cost[r][c] == Float.POSITIVE_INFINITY) { + cost[r][c] = Float.MAX_VALUE; + } + } + } + } + + /** + * Get the optimal assignments. The returned array will have the same number + * of elements as the number of elements as the number of rows in the input + * cost matrix. Each element will indicate which column should be assigned to + * that row or -1 if no column should be assigned, i.e. if result[i] = j then + * row i should be assigned to column j. Subsequent invocations of this method + * will simply return the same object without additional computation. + * @return an array with the optimal assignments + */ + public int[] solve() { + // If this assignment problem has already been solved, return the known + // solution + if (assignments != null) { + return assignments; + } + + preliminaries(); + + // Find the optimal assignments. + while (!testIsDone()) { + while (!stepOne()) { + stepThree(); + } + stepTwo(); + } + + // Extract the assignments from the mask matrix. + if (transposed) { + assignments = new int[cols]; + outer: + for (int c = 0; c < cols; c++) { + for (int r = 0; r < rows; r++) { + if (mask[r][c] == STAR) { + assignments[c] = r; + continue outer; + } + } + // There is no assignment for this row of the input/output. + assignments[c] = -1; + } + } else { + assignments = new int[rows]; + outer: + for (int r = 0; r < rows; r++) { + for (int c = 0; c < cols; c++) { + if (mask[r][c] == STAR) { + assignments[r] = c; + continue outer; + } + } + } + } + + // Once the solution has been computed, there is no need to keep any of the + // other internal structures. Clear all unnecessary internal references so + // the garbage collector may reclaim that memory. + cost = null; + mask = null; + rowsCovered = null; + colsCovered = null; + path = null; + leastInRow = null; + leastInRowIndex = null; + rowAdjust = null; + colAdjust = null; + + return assignments; + } + + /** + * Corresponds to the "preliminaries" step of the original algorithm. + * Guarantees that the matrix is an equivalent non-negative matrix with at + * least one zero in each row. + */ + private void preliminaries() { + for (int r = 0; r < rows; r++) { + // Find the minimum cost of each row. + float min = Float.POSITIVE_INFINITY; + for (int c = 0; c < cols; c++) { + min = Math.min(min, cost[r][c]); + } + + // Subtract that minimum cost from each element in the row. + for (int c = 0; c < cols; c++) { + cost[r][c] -= min; + + // If the element is now zero and there are no zeroes in the same row + // or column which are already starred, then star this one. There + // must be at least one zero because of subtracting the min cost. + if (cost[r][c] == 0 && !rowsCovered[r] && !colsCovered[c]) { + mask[r][c] = STAR; + // Cover this row and column so that no other zeroes in them can be + // starred. + rowsCovered[r] = true; + colsCovered[c] = true; + } + } + } + + // Clear the covered rows and columns. + Arrays.fill(rowsCovered, false); + Arrays.fill(colsCovered, false); + } + + /** + * Test whether the algorithm is done, i.e. we have the optimal assignment. + * This occurs when there is exactly one starred zero in each row. + * @return true if the algorithm is done + */ + private boolean testIsDone() { + // Cover all columns containing a starred zero. There can be at most one + // starred zero per column. Therefore, a covered column has an optimal + // assignment. + for (int r = 0; r < rows; r++) { + for (int c = 0; c < cols; c++) { + if (mask[r][c] == STAR) { + colsCovered[c] = true; + } + } + } + + // Count the total number of covered columns. + int coveredCols = 0; + for (int c = 0; c < cols; c++) { + coveredCols += colsCovered[c] ? 1 : 0; + } + + // Apply an row and column adjustments that are pending. + for (int r = 0; r < rows; r++) { + for (int c = 0; c < cols; c++) { + cost[r][c] += rowAdjust[r]; + cost[r][c] += colAdjust[c]; + } + } + + // Clear the pending row and column adjustments. + Arrays.fill(rowAdjust, 0); + Arrays.fill(colAdjust, 0); + + // The covers on columns and rows may have been reset, recompute the least + // value for each row. + for (int r = 0; r < rows; r++) { + leastInRow[r] = Float.POSITIVE_INFINITY; + for (int c = 0; c < cols; c++) { + if (!rowsCovered[r] && !colsCovered[c] && cost[r][c] < leastInRow[r]) { + leastInRow[r] = cost[r][c]; + leastInRowIndex[r] = c; + } + } + } + + // If all columns are covered, then we are done. Since there may be more + // columns than rows, we are also done if the number of covered columns is + // at least as great as the number of rows. + return (coveredCols == cols || coveredCols >= rows); + } + + /** + * Corresponds to step 1 of the original algorithm. + * @return false if all zeroes are covered + */ + private boolean stepOne() { + while (true) { + Pair zero = findUncoveredZero(); + if (zero == null) { + // No uncovered zeroes, need to manipulate the cost matrix in step + // three. + return false; + } else { + // Prime the uncovered zero and find a starred zero in the same row. + mask[zero.getFirst()][zero.getSecond()] = PRIME; + Pair star = starInRow(zero.getFirst()); + if (star != null) { + // Cover the row with both the newly primed zero and the starred zero. + // Since this is the only place where zeroes are primed, and we cover + // it here, and rows are only uncovered when primes are erased, then + // there can be at most one primed uncovered zero. + rowsCovered[star.getFirst()] = true; + colsCovered[star.getSecond()] = false; + updateMin(star.getFirst(), star.getSecond()); + } else { + // Will go to step two after, where a path will be constructed, + // starting from the uncovered primed zero (there is only one). Since + // we have already found it, save it as the first node in the path. + path.clear(); + path.offerLast(new Pair(zero.getFirst(), + zero.getSecond())); + return true; + } + } + } + } + + /** + * Corresponds to step 2 of the original algorithm. + */ + private void stepTwo() { + // Construct a path of alternating starred zeroes and primed zeroes, where + // each starred zero is in the same column as the previous primed zero, and + // each primed zero is in the same row as the previous starred zero. The + // path will always end in a primed zero. + while (true) { + Pair star = starInCol(path.getLast().getSecond()); + if (star != null) { + path.offerLast(star); + } else { + break; + } + Pair prime = primeInRow(path.getLast().getFirst()); + path.offerLast(prime); + } + + // Augment path - unmask all starred zeroes and star all primed zeroes. All + // nodes in the path will be either starred or primed zeroes. The set of + // starred zeroes is independent and now one larger than before. + for (Pair p : path) { + if (mask[p.getFirst()][p.getSecond()] == STAR) { + mask[p.getFirst()][p.getSecond()] = NONE; + } else { + mask[p.getFirst()][p.getSecond()] = STAR; + } + } + + // Clear all covers from rows and columns. + Arrays.fill(rowsCovered, false); + Arrays.fill(colsCovered, false); + + // Remove the prime mask from all primed zeroes. + for (int r = 0; r < rows; r++) { + for (int c = 0; c < cols; c++) { + if (mask[r][c] == PRIME) { + mask[r][c] = NONE; + } + } + } + } + + /** + * Corresponds to step 3 of the original algorithm. + */ + private void stepThree() { + // Find the minimum uncovered cost. + float min = leastInRow[0]; + for (int r = 1; r < rows; r++) { + if (leastInRow[r] < min) { + min = leastInRow[r]; + } + } + + // Add the minimum cost to each of the costs in a covered row, or subtract + // the minimum cost from each of the costs in an uncovered column. As an + // optimization, do not actually modify the cost matrix yet, but track the + // adjustments that need to be made to each row and column. + for (int r = 0; r < rows; r++) { + if (rowsCovered[r]) { + rowAdjust[r] += min; + } + } + for (int c = 0; c < cols; c++) { + if (!colsCovered[c]) { + colAdjust[c] -= min; + } + } + + // Since the cost matrix is not being updated yet, the minimum uncovered + // cost per row must be updated. + for (int r = 0; r < rows; r++) { + if (!colsCovered[leastInRowIndex[r]]) { + // The least value in this row was in an uncovered column, meaning that + // it would have had the minimum value subtracted from it, and therefore + // will still be the minimum value in that row. + leastInRow[r] -= min; + } else { + // The least value in this row was in a covered column and would not + // have had the minimum value subtracted from it, so the minimum value + // could be some in another column. + for (int c = 0; c < cols; c++) { + if (cost[r][c] + colAdjust[c] + rowAdjust[r] < leastInRow[r]) { + leastInRow[r] = cost[r][c] + colAdjust[c] + rowAdjust[r]; + leastInRowIndex[r] = c; + } + } + } + } + } + + /** + * Find a zero cost assignment which is not covered. If there are no zero cost + * assignments which are uncovered, then null will be returned. + * @return pair of row and column indices of an uncovered zero or null + */ + private Pair findUncoveredZero() { + for (int r = 0; r < rows; r++) { + if (leastInRow[r] == 0) { + return new Pair(r, leastInRowIndex[r]); + } + } + return null; + } + + /** + * A specified row has become covered, and a specified column has become + * uncovered. The least value per row may need to be updated. + * @param row the index of the row which was just covered + * @param col the index of the column which was just uncovered + */ + private void updateMin(int row, int col) { + // If the row is covered we want to ignore it as far as least values go. + leastInRow[row] = Float.POSITIVE_INFINITY; + + for (int r = 0; r < rows; r++) { + // Since the column has only just been uncovered, it could not have any + // pending adjustments. Only covered rows can have pending adjustments + // and covered costs do not count toward row minimums. Therefore, we do + // not need to consider rowAdjust[r] or colAdjust[col]. + if (!rowsCovered[r] && cost[r][col] < leastInRow[r]) { + leastInRow[r] = cost[r][col]; + leastInRowIndex[r] = col; + } + } + } + + /** + * Find a starred zero in a specified row. If there are no starred zeroes in + * the specified row, then null will be returned. + * @param r the index of the row to be searched + * @return pair of row and column indices of starred zero or null + */ + private Pair starInRow(int r) { + for (int c = 0; c < cols; c++) { + if (mask[r][c] == STAR) { + return new Pair(r, c); + } + } + return null; + } + + /** + * Find a starred zero in the specified column. If there are no starred zeroes + * in the specified row, then null will be returned. + * @param c the index of the column to be searched + * @return pair of row and column indices of starred zero or null + */ + private Pair starInCol(int c) { + for (int r = 0; r < rows; r++) { + if (mask[r][c] == STAR) { + return new Pair(r, c); + } + } + return null; + } + + /** + * Find a primed zero in the specified row. If there are no primed zeroes in + * the specified row, then null will be returned. + * @param r the index of the row to be searched + * @return pair of row and column indices of primed zero or null + */ + private Pair primeInRow(int r) { + for (int c = 0; c < cols; c++) { + if (mask[r][c] == PRIME) { + return new Pair(r, c); + } + } + return null; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 8c9e0ba8263..305b94c5409 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -71,6 +71,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; @@ -557,4 +559,10 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { // TODO Auto-generated method stub return null; } + + @Override + public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller, + UpdateFavoredNodesRequest request) throws ServiceException { + return null; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java index e9524f414b6..f8260876bd3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java @@ -25,16 +25,19 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -53,15 +56,19 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper; import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer; -import org.apache.hadoop.hbase.master.balancer.FavoredNodes.Position; +import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan; +import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan.Position; import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import static org.junit.Assert.fail; + @Category(MediumTests.class) public class TestRegionPlacement { @@ -69,7 +76,9 @@ public class TestRegionPlacement { private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final static int SLAVES = 10; private static HBaseAdmin admin; + private static RegionPlacementMaintainer rp; private static Position[] positions = Position.values(); + private int lastRegionOnPrimaryRSCount = 0; private int REGION_NUM = 10; private Map favoredNodesAssignmentPlan = new HashMap(); @@ -86,6 +95,7 @@ public class TestRegionPlacement { conf.setBoolean("hbase.tests.use.shortcircuit.reads", false); TEST_UTIL.startMiniCluster(SLAVES); admin = new HBaseAdmin(conf); + rp = new RegionPlacementMaintainer(conf); } @AfterClass @@ -198,19 +208,255 @@ public class TestRegionPlacement { !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY))); } - @Test(timeout = 180000) + @Test public void testRegionPlacement() throws Exception { + byte[] table = Bytes.toBytes("testRegionAssignment"); // Create a table with REGION_NUM regions. - createTable("testRegionAssignment", REGION_NUM); + createTable(table, REGION_NUM); - TEST_UTIL.waitTableAvailable(Bytes.toBytes("testRegionAssignment")); + TEST_UTIL.waitTableAvailable(table); // Verify all the user regions are assigned to the primary region server // based on the plan - countRegionOnPrimaryRS(REGION_NUM); + verifyRegionOnPrimaryRS(REGION_NUM); + FavoredNodesPlan currentPlan = rp.getRegionAssignmentSnapshot().getExistingAssignmentPlan(); // Verify all the region server are update with the latest favored nodes - verifyRegionServerUpdated(); + verifyRegionServerUpdated(currentPlan); + // Test Case 2: To verify whether the region placement tools can + // correctly update the new assignment plan to META and Region Server. + // The new assignment plan is generated by shuffle the existing assignment + // plan by switching PRIMARY, SECONDARY and TERTIARY nodes. + // Shuffle the plan by switching the secondary region server with + // the tertiary. + + // Shuffle the secondary with tertiary favored nodes + FavoredNodesPlan shuffledPlan = this.shuffleAssignmentPlan(currentPlan, + FavoredNodesPlan.Position.SECONDARY, FavoredNodesPlan.Position.TERTIARY); + // Let the region placement update the META and Region Servers + rp.updateAssignmentPlan(shuffledPlan); + + // Verify the region assignment. There are supposed to no region reassignment + // All the regions are still on the primary region server + verifyRegionAssignment(shuffledPlan,0, REGION_NUM); + + // Shuffle the plan by switching the primary with secondary and + // verify the region reassignment is consistent with the plan. + shuffledPlan = this.shuffleAssignmentPlan(currentPlan, + FavoredNodesPlan.Position.PRIMARY, FavoredNodesPlan.Position.SECONDARY); + + // Let the region placement update the META and Region Servers + rp.updateAssignmentPlan(shuffledPlan); + + verifyRegionAssignment(shuffledPlan, REGION_NUM, REGION_NUM); + // Check when a RS stops, the regions get assigned to their secondary/tertiary + killRandomServerAndVerifyAssignment(); + RegionPlacementMaintainer.printAssignmentPlan(currentPlan); + } + + private void killRandomServerAndVerifyAssignment() + throws IOException, InterruptedException { + ClusterStatus oldStatus = TEST_UTIL.getHBaseCluster().getClusterStatus(); + ServerName servers[] = oldStatus.getServers().toArray(new ServerName[10]); + ServerName serverToKill = null; + int killIndex = 0; + Random random = new Random(System.currentTimeMillis()); + ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta(); + LOG.debug("Server holding meta " + metaServer); + do { + // kill a random non-meta server carrying at least one region + killIndex = random.nextInt(servers.length); + serverToKill = TEST_UTIL.getHBaseCluster().getRegionServer(killIndex).getServerName(); + } while (ServerName.isSameHostnameAndPort(metaServer, serverToKill) || + TEST_UTIL.getHBaseCluster().getRegionServer(killIndex).getNumberOfOnlineRegions() == 0); + LOG.debug("Stopping RS " + serverToKill); + Map> regionsToVerify = + new HashMap>(); + // mark the regions to track + for (Map.Entry entry : favoredNodesAssignmentPlan.entrySet()) { + ServerName s = entry.getValue()[0]; + if (ServerName.isSameHostnameAndPort(s, serverToKill)) { + regionsToVerify.put(entry.getKey(), new Pair( + entry.getValue()[1], entry.getValue()[2])); + LOG.debug("Adding " + entry.getKey() + " with sedcondary/tertiary " + + entry.getValue()[1] + " " + entry.getValue()[2]); + } + } + int orig = TEST_UTIL.getHBaseCluster().getMaster().assignmentManager.getNumRegionsOpened(); + TEST_UTIL.getHBaseCluster().stopRegionServer(serverToKill); + TEST_UTIL.getHBaseCluster().waitForRegionServerToStop(serverToKill, 60000); + int curr = TEST_UTIL.getHBaseCluster().getMaster().assignmentManager.getNumRegionsOpened(); + while (curr - orig < regionsToVerify.size()) { + LOG.debug("Waiting for " + regionsToVerify.size() + " to come online " + + " Current #regions " + curr + " Original #regions " + orig); + Thread.sleep(200); + curr = TEST_UTIL.getHBaseCluster().getMaster().assignmentManager.getNumRegionsOpened(); + } + // now verify + for (Map.Entry> entry : regionsToVerify.entrySet()) { + ServerName newDestination = TEST_UTIL.getHBaseCluster().getMaster() + .getAssignmentManager().getRegionStates().getRegionServerOfRegion(entry.getKey()); + Pair secondaryTertiaryServers = entry.getValue(); + LOG.debug("New destination for region " + entry.getKey().getEncodedName() + + " " + newDestination +". Secondary/Tertiary are " + secondaryTertiaryServers.getFirst() + + "/" + secondaryTertiaryServers.getSecond()); + if (!(ServerName.isSameHostnameAndPort(newDestination, secondaryTertiaryServers.getFirst())|| + ServerName.isSameHostnameAndPort(newDestination, secondaryTertiaryServers.getSecond()))){ + fail("Region " + entry.getKey() + " not present on any of the expected servers"); + } + } + // start(reinstate) region server since we killed one before + TEST_UTIL.getHBaseCluster().startRegionServer(); + } + + /** + * Used to test the correctness of this class. + */ + @Test + public void testRandomizedMatrix() { + int rows = 100; + int cols = 100; + float[][] matrix = new float[rows][cols]; + Random random = new Random(); + for (int i = 0; i < rows; i++) { + for (int j = 0; j < cols; j++) { + matrix[i][j] = random.nextFloat(); + } + } + + // Test that inverting a transformed matrix gives the original matrix. + RegionPlacementMaintainer.RandomizedMatrix rm = + new RegionPlacementMaintainer.RandomizedMatrix(rows, cols); + float[][] transformed = rm.transform(matrix); + float[][] invertedTransformed = rm.invert(transformed); + for (int i = 0; i < rows; i++) { + for (int j = 0; j < cols; j++) { + if (matrix[i][j] != invertedTransformed[i][j]) { + throw new RuntimeException(); + } + } + } + + // Test that the indices on a transformed matrix can be inverted to give + // the same values on the original matrix. + int[] transformedIndices = new int[rows]; + for (int i = 0; i < rows; i++) { + transformedIndices[i] = random.nextInt(cols); + } + int[] invertedTransformedIndices = rm.invertIndices(transformedIndices); + float[] transformedValues = new float[rows]; + float[] invertedTransformedValues = new float[rows]; + for (int i = 0; i < rows; i++) { + transformedValues[i] = transformed[i][transformedIndices[i]]; + invertedTransformedValues[i] = matrix[i][invertedTransformedIndices[i]]; + } + Arrays.sort(transformedValues); + Arrays.sort(invertedTransformedValues); + if (!Arrays.equals(transformedValues, invertedTransformedValues)) { + throw new RuntimeException(); + } + } + + /** + * Shuffle the assignment plan by switching two favored node positions. + * @param plan The assignment plan + * @param p1 The first switch position + * @param p2 The second switch position + * @return + */ + private FavoredNodesPlan shuffleAssignmentPlan(FavoredNodesPlan plan, + FavoredNodesPlan.Position p1, FavoredNodesPlan.Position p2) { + FavoredNodesPlan shuffledPlan = new FavoredNodesPlan(); + + for (Map.Entry> entry : + plan.getAssignmentMap().entrySet()) { + HRegionInfo region = entry.getKey(); + + // copy the server list from the original plan + List shuffledServerList = new ArrayList(); + shuffledServerList.addAll(entry.getValue()); + + // start to shuffle + shuffledServerList.set(p1.ordinal(), entry.getValue().get(p2.ordinal())); + shuffledServerList.set(p2.ordinal(), entry.getValue().get(p1.ordinal())); + + // update the plan + shuffledPlan.updateAssignmentPlan(region, shuffledServerList); + } + return shuffledPlan; + } + + /** + * To verify the region assignment status. + * It will check the assignment plan consistency between META and + * region servers. + * Also it will verify weather the number of region movement and + * the number regions on the primary region server are expected + * + * @param plan + * @param regionMovementNum + * @param numRegionsOnPrimaryRS + * @throws InterruptedException + * @throws IOException + */ + private void verifyRegionAssignment(FavoredNodesPlan plan, + int regionMovementNum, int numRegionsOnPrimaryRS) + throws InterruptedException, IOException { + // Verify the assignment plan in META is consistent with the expected plan. + verifyMETAUpdated(plan); + + // Verify the number of region movement is expected + verifyRegionMovementNum(regionMovementNum); + + // Verify the number of regions is assigned to the primary region server + // based on the plan is expected + verifyRegionOnPrimaryRS(numRegionsOnPrimaryRS); + + // Verify all the online region server are updated with the assignment plan + verifyRegionServerUpdated(plan); + } + + /** + * Verify the meta has updated to the latest assignment plan + * @param plan + * @throws IOException + */ + private void verifyMETAUpdated(FavoredNodesPlan expectedPlan) + throws IOException { + FavoredNodesPlan planFromMETA = rp.getRegionAssignmentSnapshot().getExistingAssignmentPlan(); + assertTrue("The assignment plan is NOT consistent with the expected plan ", + planFromMETA.equals(expectedPlan)); + } + + /** + * Verify the number of region movement is expected + */ + private void verifyRegionMovementNum(int expected) + throws InterruptedException, HBaseIOException { + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + HMaster m = cluster.getMaster(); + int lastRegionOpenedCount = m.assignmentManager.getNumRegionsOpened(); + // get the assignments start to execute + m.balance(); + + int retry = 10; + long sleep = 3000; + int attempt = 0; + int currentRegionOpened, regionMovement; + do { + currentRegionOpened = m.assignmentManager.getNumRegionsOpened(); + regionMovement= currentRegionOpened - lastRegionOpenedCount; + LOG.debug("There are " + regionMovement + "/" + expected + + " regions moved after " + attempt + " attempts"); + Thread.sleep((++attempt) * sleep); + } while (regionMovement != expected && attempt <= retry); + + // update the lastRegionOpenedCount + lastRegionOpenedCount = currentRegionOpened; + + assertEquals("There are only " + regionMovement + " instead of " + + expected + " region movement for " + attempt + " attempts", + regionMovement, expected); } private List removeMatchingServers(ServerName serverWithoutStartCode, @@ -240,9 +486,9 @@ public class TestRegionPlacement { * @param expectedNum. * @throws IOException */ - private void countRegionOnPrimaryRS(int expectedNum) + private void verifyRegionOnPrimaryRS(int expectedNum) throws IOException { - int lastRegionOnPrimaryRSCount = getNumRegionisOnPrimaryRS(); + lastRegionOnPrimaryRSCount = getNumRegionisOnPrimaryRS(); assertEquals("Only " + expectedNum + " of user regions running " + "on the primary region server", expectedNum , lastRegionOnPrimaryRSCount); @@ -254,7 +500,7 @@ public class TestRegionPlacement { * @param plan * @throws IOException */ - private void verifyRegionServerUpdated() throws IOException { + private void verifyRegionServerUpdated(FavoredNodesPlan plan) throws IOException { // Verify all region servers contain the correct favored nodes information MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); for (int i = 0; i < SLAVES; i++) { @@ -263,7 +509,7 @@ public class TestRegionPlacement { TableName.valueOf("testRegionAssignment"))) { InetSocketAddress[] favoredSocketAddress = rs.getFavoredNodesForRegion( region.getRegionInfo().getEncodedName()); - ServerName[] favoredServerList = favoredNodesAssignmentPlan.get(region.getRegionInfo()); + List favoredServerList = plan.getAssignmentMap().get(region.getRegionInfo()); // All regions are supposed to have favored nodes, // except for META and ROOT @@ -278,12 +524,12 @@ public class TestRegionPlacement { } else { // For user region, the favored nodes in the region server should be // identical to favored nodes in the assignmentPlan - assertTrue(favoredSocketAddress.length == favoredServerList.length); - assertTrue(favoredServerList.length > 0); - for (int j = 0; j < favoredServerList.length; j++) { + assertTrue(favoredSocketAddress.length == favoredServerList.size()); + assertTrue(favoredServerList.size() > 0); + for (int j = 0; j < favoredServerList.size(); j++) { InetSocketAddress addrFromRS = favoredSocketAddress[j]; InetSocketAddress addrFromPlan = InetSocketAddress.createUnresolved( - favoredServerList[j].getHostname(), favoredServerList[j].getPort()); + favoredServerList.get(j).getHostname(), favoredServerList.get(j).getPort()); assertNotNull(addrFromRS); assertNotNull(addrFromPlan); @@ -379,9 +625,8 @@ public class TestRegionPlacement { * @return * @throws IOException */ - private static void createTable(String table, int regionNum) + private static void createTable(byte[] tableName, int regionNum) throws IOException { - byte[] tableName = Bytes.toBytes(table); int expectedRegions = regionNum; byte[][] splitKeys = new byte[expectedRegions - 1][]; for (int i = 1; i < expectedRegions; i++) {