HBASE-7932. Introduces Favored Nodes for region files. Adds a balancer called FavoredNodeLoadBalancer that will honor favored nodes in the process of balancing but the balance operation is currently a no-op (Devaraj Das)

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1481476 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Devaraj Das 2013-05-12 06:47:39 +00:00
parent 3185729df1
commit 7a7ab8b8da
21 changed files with 2505 additions and 149 deletions

View File

@ -27,7 +27,9 @@ import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServ
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.regex.Pattern;
/**
@ -91,6 +93,7 @@ public class ServerName implements Comparable<ServerName> {
* @see #getVersionedBytes()
*/
private byte [] bytes;
public static final List<ServerName> EMPTY_SERVER_LIST = new ArrayList<ServerName>(0);
public ServerName(final String hostname, final int port, final long startcode) {
this.hostname = hostname;

View File

@ -1388,7 +1388,7 @@ public final class ProtobufUtil {
public static void openRegion(final AdminService.BlockingInterface admin,
final HRegionInfo region) throws IOException {
OpenRegionRequest request =
RequestConverter.buildOpenRegionRequest(region, -1);
RequestConverter.buildOpenRegionRequest(region, -1, null);
try {
admin.openRegion(null, request);
} catch (ServiceException se) {

View File

@ -97,6 +97,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRe
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Triple;
import com.google.protobuf.ByteString;
@ -685,13 +686,14 @@ public final class RequestConverter {
* @return a protocol buffer OpenRegionRequest
*/
public static OpenRegionRequest
buildOpenRegionRequest(final List<Pair<HRegionInfo, Integer>> regionOpenInfos) {
buildOpenRegionRequest(final List<Triple<HRegionInfo, Integer,
List<ServerName>>> regionOpenInfos) {
OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
for (Pair<HRegionInfo, Integer> regionOpenInfo: regionOpenInfos) {
for (Triple<HRegionInfo, Integer, List<ServerName>> regionOpenInfo: regionOpenInfos) {
Integer second = regionOpenInfo.getSecond();
int versionOfOfflineNode = second == null ? -1 : second.intValue();
builder.addOpenInfo(buildRegionOpenInfo(
regionOpenInfo.getFirst(), versionOfOfflineNode));
regionOpenInfo.getFirst(), versionOfOfflineNode, regionOpenInfo.getThird()));
}
return builder.build();
}
@ -700,13 +702,14 @@ public final class RequestConverter {
* Create a protocol buffer OpenRegionRequest for a given region
*
* @param region the region to open
* @param versionOfOfflineNode that needs to be present in the offline node
* @param versionOfOfflineNode that needs to be present in the offline node
* @param favoredNodes
* @return a protocol buffer OpenRegionRequest
*/
public static OpenRegionRequest buildOpenRegionRequest(
final HRegionInfo region, final int versionOfOfflineNode) {
final HRegionInfo region, final int versionOfOfflineNode, List<ServerName> favoredNodes) {
OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
builder.addOpenInfo(buildRegionOpenInfo(region, versionOfOfflineNode));
builder.addOpenInfo(buildRegionOpenInfo(region, versionOfOfflineNode, favoredNodes));
return builder.build();
}
@ -1260,12 +1263,18 @@ public final class RequestConverter {
* Create a RegionOpenInfo based on given region info and version of offline node
*/
private static RegionOpenInfo buildRegionOpenInfo(
final HRegionInfo region, final int versionOfOfflineNode) {
final HRegionInfo region, final int versionOfOfflineNode,
final List<ServerName> favoredNodes) {
RegionOpenInfo.Builder builder = RegionOpenInfo.newBuilder();
builder.setRegion(HRegionInfo.convert(region));
if (versionOfOfflineNode >= 0) {
builder.setVersionOfOfflineNode(versionOfOfflineNode);
}
if (favoredNodes != null) {
for (ServerName server : favoredNodes) {
builder.addFavoredNodes(ProtobufUtil.toServerName(server));
}
}
return builder.build();
}
}

View File

@ -3118,6 +3118,16 @@ public final class AdminProtos {
// optional uint32 versionOfOfflineNode = 2;
boolean hasVersionOfOfflineNode();
int getVersionOfOfflineNode();
// repeated .ServerName favoredNodes = 3;
java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>
getFavoredNodesList();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNodes(int index);
int getFavoredNodesCount();
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getFavoredNodesOrBuilderList();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodesOrBuilder(
int index);
}
public static final class RegionOpenInfo extends
com.google.protobuf.GeneratedMessage
@ -3171,9 +3181,31 @@ public final class AdminProtos {
return versionOfOfflineNode_;
}
// repeated .ServerName favoredNodes = 3;
public static final int FAVOREDNODES_FIELD_NUMBER = 3;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> favoredNodes_;
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> getFavoredNodesList() {
return favoredNodes_;
}
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getFavoredNodesOrBuilderList() {
return favoredNodes_;
}
public int getFavoredNodesCount() {
return favoredNodes_.size();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNodes(int index) {
return favoredNodes_.get(index);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodesOrBuilder(
int index) {
return favoredNodes_.get(index);
}
private void initFields() {
region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
versionOfOfflineNode_ = 0;
favoredNodes_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@ -3188,6 +3220,12 @@ public final class AdminProtos {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getFavoredNodesCount(); i++) {
if (!getFavoredNodes(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@ -3201,6 +3239,9 @@ public final class AdminProtos {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, versionOfOfflineNode_);
}
for (int i = 0; i < favoredNodes_.size(); i++) {
output.writeMessage(3, favoredNodes_.get(i));
}
getUnknownFields().writeTo(output);
}
@ -3218,6 +3259,10 @@ public final class AdminProtos {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, versionOfOfflineNode_);
}
for (int i = 0; i < favoredNodes_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, favoredNodes_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@ -3251,6 +3296,8 @@ public final class AdminProtos {
result = result && (getVersionOfOfflineNode()
== other.getVersionOfOfflineNode());
}
result = result && getFavoredNodesList()
.equals(other.getFavoredNodesList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@ -3268,6 +3315,10 @@ public final class AdminProtos {
hash = (37 * hash) + VERSIONOFOFFLINENODE_FIELD_NUMBER;
hash = (53 * hash) + getVersionOfOfflineNode();
}
if (getFavoredNodesCount() > 0) {
hash = (37 * hash) + FAVOREDNODES_FIELD_NUMBER;
hash = (53 * hash) + getFavoredNodesList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
@ -3377,6 +3428,7 @@ public final class AdminProtos {
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegionFieldBuilder();
getFavoredNodesFieldBuilder();
}
}
private static Builder create() {
@ -3393,6 +3445,12 @@ public final class AdminProtos {
bitField0_ = (bitField0_ & ~0x00000001);
versionOfOfflineNode_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
if (favoredNodesBuilder_ == null) {
favoredNodes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
} else {
favoredNodesBuilder_.clear();
}
return this;
}
@ -3443,6 +3501,15 @@ public final class AdminProtos {
to_bitField0_ |= 0x00000002;
}
result.versionOfOfflineNode_ = versionOfOfflineNode_;
if (favoredNodesBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
favoredNodes_ = java.util.Collections.unmodifiableList(favoredNodes_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.favoredNodes_ = favoredNodes_;
} else {
result.favoredNodes_ = favoredNodesBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@ -3465,6 +3532,32 @@ public final class AdminProtos {
if (other.hasVersionOfOfflineNode()) {
setVersionOfOfflineNode(other.getVersionOfOfflineNode());
}
if (favoredNodesBuilder_ == null) {
if (!other.favoredNodes_.isEmpty()) {
if (favoredNodes_.isEmpty()) {
favoredNodes_ = other.favoredNodes_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureFavoredNodesIsMutable();
favoredNodes_.addAll(other.favoredNodes_);
}
onChanged();
}
} else {
if (!other.favoredNodes_.isEmpty()) {
if (favoredNodesBuilder_.isEmpty()) {
favoredNodesBuilder_.dispose();
favoredNodesBuilder_ = null;
favoredNodes_ = other.favoredNodes_;
bitField0_ = (bitField0_ & ~0x00000004);
favoredNodesBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getFavoredNodesFieldBuilder() : null;
} else {
favoredNodesBuilder_.addAllMessages(other.favoredNodes_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@ -3478,6 +3571,12 @@ public final class AdminProtos {
return false;
}
for (int i = 0; i < getFavoredNodesCount(); i++) {
if (!getFavoredNodes(i).isInitialized()) {
return false;
}
}
return true;
}
@ -3518,6 +3617,12 @@ public final class AdminProtos {
versionOfOfflineNode_ = input.readUInt32();
break;
}
case 26: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder();
input.readMessage(subBuilder, extensionRegistry);
addFavoredNodes(subBuilder.buildPartial());
break;
}
}
}
}
@ -3635,6 +3740,192 @@ public final class AdminProtos {
return this;
}
// repeated .ServerName favoredNodes = 3;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> favoredNodes_ =
java.util.Collections.emptyList();
private void ensureFavoredNodesIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
favoredNodes_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>(favoredNodes_);
bitField0_ |= 0x00000004;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> favoredNodesBuilder_;
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> getFavoredNodesList() {
if (favoredNodesBuilder_ == null) {
return java.util.Collections.unmodifiableList(favoredNodes_);
} else {
return favoredNodesBuilder_.getMessageList();
}
}
public int getFavoredNodesCount() {
if (favoredNodesBuilder_ == null) {
return favoredNodes_.size();
} else {
return favoredNodesBuilder_.getCount();
}
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNodes(int index) {
if (favoredNodesBuilder_ == null) {
return favoredNodes_.get(index);
} else {
return favoredNodesBuilder_.getMessage(index);
}
}
public Builder setFavoredNodes(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (favoredNodesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureFavoredNodesIsMutable();
favoredNodes_.set(index, value);
onChanged();
} else {
favoredNodesBuilder_.setMessage(index, value);
}
return this;
}
public Builder setFavoredNodes(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (favoredNodesBuilder_ == null) {
ensureFavoredNodesIsMutable();
favoredNodes_.set(index, builderForValue.build());
onChanged();
} else {
favoredNodesBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
public Builder addFavoredNodes(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (favoredNodesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureFavoredNodesIsMutable();
favoredNodes_.add(value);
onChanged();
} else {
favoredNodesBuilder_.addMessage(value);
}
return this;
}
public Builder addFavoredNodes(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (favoredNodesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureFavoredNodesIsMutable();
favoredNodes_.add(index, value);
onChanged();
} else {
favoredNodesBuilder_.addMessage(index, value);
}
return this;
}
public Builder addFavoredNodes(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (favoredNodesBuilder_ == null) {
ensureFavoredNodesIsMutable();
favoredNodes_.add(builderForValue.build());
onChanged();
} else {
favoredNodesBuilder_.addMessage(builderForValue.build());
}
return this;
}
public Builder addFavoredNodes(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (favoredNodesBuilder_ == null) {
ensureFavoredNodesIsMutable();
favoredNodes_.add(index, builderForValue.build());
onChanged();
} else {
favoredNodesBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
public Builder addAllFavoredNodes(
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> values) {
if (favoredNodesBuilder_ == null) {
ensureFavoredNodesIsMutable();
super.addAll(values, favoredNodes_);
onChanged();
} else {
favoredNodesBuilder_.addAllMessages(values);
}
return this;
}
public Builder clearFavoredNodes() {
if (favoredNodesBuilder_ == null) {
favoredNodes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
} else {
favoredNodesBuilder_.clear();
}
return this;
}
public Builder removeFavoredNodes(int index) {
if (favoredNodesBuilder_ == null) {
ensureFavoredNodesIsMutable();
favoredNodes_.remove(index);
onChanged();
} else {
favoredNodesBuilder_.remove(index);
}
return this;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getFavoredNodesBuilder(
int index) {
return getFavoredNodesFieldBuilder().getBuilder(index);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodesOrBuilder(
int index) {
if (favoredNodesBuilder_ == null) {
return favoredNodes_.get(index); } else {
return favoredNodesBuilder_.getMessageOrBuilder(index);
}
}
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getFavoredNodesOrBuilderList() {
if (favoredNodesBuilder_ != null) {
return favoredNodesBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(favoredNodes_);
}
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addFavoredNodesBuilder() {
return getFavoredNodesFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addFavoredNodesBuilder(
int index) {
return getFavoredNodesFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
}
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder>
getFavoredNodesBuilderList() {
return getFavoredNodesFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getFavoredNodesFieldBuilder() {
if (favoredNodesBuilder_ == null) {
favoredNodesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
favoredNodes_,
((bitField0_ & 0x00000004) == 0x00000004),
getParentForChildren(),
isClean());
favoredNodes_ = null;
}
return favoredNodesBuilder_;
}
// @@protoc_insertion_point(builder_scope:OpenRegionRequest.RegionOpenInfo)
}
@ -14976,65 +15267,66 @@ public final class AdminProtos {
"gionSpecifier\022\016\n\006family\030\002 \003(\014\")\n\024GetStor",
"eFileResponse\022\021\n\tstoreFile\030\001 \003(\t\"\030\n\026GetO" +
"nlineRegionRequest\":\n\027GetOnlineRegionRes" +
"ponse\022\037\n\nregionInfo\030\001 \003(\0132\013.RegionInfo\"\225" +
"ponse\022\037\n\nregionInfo\030\001 \003(\0132\013.RegionInfo\"\270" +
"\001\n\021OpenRegionRequest\0223\n\010openInfo\030\001 \003(\0132!" +
".OpenRegionRequest.RegionOpenInfo\032K\n\016Reg" +
".OpenRegionRequest.RegionOpenInfo\032n\n\016Reg" +
"ionOpenInfo\022\033\n\006region\030\001 \002(\0132\013.RegionInfo" +
"\022\034\n\024versionOfOfflineNode\030\002 \001(\r\"\234\001\n\022OpenR" +
"egionResponse\022<\n\014openingState\030\001 \003(\0162&.Op" +
"enRegionResponse.RegionOpeningState\"H\n\022R" +
"egionOpeningState\022\n\n\006OPENED\020\000\022\022\n\016ALREADY",
"_OPENED\020\001\022\022\n\016FAILED_OPENING\020\002\"\232\001\n\022CloseR" +
"egionRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpe" +
"cifier\022\034\n\024versionOfClosingNode\030\002 \001(\r\022\034\n\016" +
"transitionInZK\030\003 \001(\010:\004true\022&\n\021destinatio" +
"nServer\030\004 \001(\0132\013.ServerName\"%\n\023CloseRegio" +
"nResponse\022\016\n\006closed\030\001 \002(\010\"M\n\022FlushRegion" +
"\022\034\n\024versionOfOfflineNode\030\002 \001(\r\022!\n\014favore" +
"dNodes\030\003 \003(\0132\013.ServerName\"\234\001\n\022OpenRegion" +
"Response\022<\n\014openingState\030\001 \003(\0162&.OpenReg" +
"ionResponse.RegionOpeningState\"H\n\022Region",
"OpeningState\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OPEN" +
"ED\020\001\022\022\n\016FAILED_OPENING\020\002\"\232\001\n\022CloseRegion" +
"Request\022 \n\006region\030\001 \002(\0132\020.RegionSpecifie" +
"r\022\025\n\rifOlderThanTs\030\002 \001(\004\"=\n\023FlushRegionR" +
"esponse\022\025\n\rlastFlushTime\030\001 \002(\004\022\017\n\007flushe" +
"d\030\002 \001(\010\"J\n\022SplitRegionRequest\022 \n\006region\030",
"\001 \002(\0132\020.RegionSpecifier\022\022\n\nsplitPoint\030\002 " +
"\001(\014\"\025\n\023SplitRegionResponse\"W\n\024CompactReg" +
"ionRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpeci" +
"fier\022\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003 \001(\014\"\027\n\025C" +
"ompactRegionResponse\"t\n\023MergeRegionsRequ" +
"est\022!\n\007regionA\030\001 \002(\0132\020.RegionSpecifier\022!" +
"\n\007regionB\030\002 \002(\0132\020.RegionSpecifier\022\027\n\010for" +
"cible\030\003 \001(\010:\005false\"\026\n\024MergeRegionsRespon" +
"se\"7\n\010WALEntry\022\024\n\003key\030\001 \002(\0132\007.WALKey\022\025\n\r" +
"keyValueBytes\030\002 \003(\014\"4\n\030ReplicateWALEntry",
"Request\022\030\n\005entry\030\001 \003(\0132\t.WALEntry\"\033\n\031Rep" +
"licateWALEntryResponse\"\026\n\024RollWALWriterR" +
"equest\".\n\025RollWALWriterResponse\022\025\n\rregio" +
"nToFlush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006r" +
"eason\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024Get" +
"ServerInfoRequest\"@\n\nServerInfo\022\037\n\nserve" +
"rName\030\001 \002(\0132\013.ServerName\022\021\n\twebuiPort\030\002 " +
"\001(\r\"8\n\025GetServerInfoResponse\022\037\n\nserverIn" +
"fo\030\001 \002(\0132\013.ServerInfo2\266\006\n\014AdminService\022>" +
"\n\rgetRegionInfo\022\025.GetRegionInfoRequest\032\026",
".GetRegionInfoResponse\022;\n\014getStoreFile\022\024" +
".GetStoreFileRequest\032\025.GetStoreFileRespo" +
"nse\022D\n\017getOnlineRegion\022\027.GetOnlineRegion" +
"Request\032\030.GetOnlineRegionResponse\0225\n\nope" +
"nRegion\022\022.OpenRegionRequest\032\023.OpenRegion" +
"Response\0228\n\013closeRegion\022\023.CloseRegionReq" +
"uest\032\024.CloseRegionResponse\0228\n\013flushRegio" +
"n\022\023.FlushRegionRequest\032\024.FlushRegionResp" +
"onse\0228\n\013splitRegion\022\023.SplitRegionRequest" +
"\032\024.SplitRegionResponse\022>\n\rcompactRegion\022",
"\025.CompactRegionRequest\032\026.CompactRegionRe" +
"sponse\022;\n\014mergeRegions\022\024.MergeRegionsReq" +
"uest\032\025.MergeRegionsResponse\022J\n\021replicate" +
"WALEntry\022\031.ReplicateWALEntryRequest\032\032.Re" +
"plicateWALEntryResponse\022>\n\rrollWALWriter" +
"\022\025.RollWALWriterRequest\032\026.RollWALWriterR" +
"esponse\022>\n\rgetServerInfo\022\025.GetServerInfo" +
"Request\032\026.GetServerInfoResponse\0225\n\nstopS" +
"erver\022\022.StopServerRequest\032\023.StopServerRe" +
"sponseBA\n*org.apache.hadoop.hbase.protob",
"uf.generatedB\013AdminProtosH\001\210\001\001\240\001\001"
"r\022\034\n\024versionOfClosingNode\030\002 \001(\r\022\034\n\016trans" +
"itionInZK\030\003 \001(\010:\004true\022&\n\021destinationServ" +
"er\030\004 \001(\0132\013.ServerName\"%\n\023CloseRegionResp" +
"onse\022\016\n\006closed\030\001 \002(\010\"M\n\022FlushRegionReque" +
"st\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\025\n\r" +
"ifOlderThanTs\030\002 \001(\004\"=\n\023FlushRegionRespon" +
"se\022\025\n\rlastFlushTime\030\001 \002(\004\022\017\n\007flushed\030\002 \001",
"(\010\"J\n\022SplitRegionRequest\022 \n\006region\030\001 \002(\013" +
"2\020.RegionSpecifier\022\022\n\nsplitPoint\030\002 \001(\014\"\025" +
"\n\023SplitRegionResponse\"W\n\024CompactRegionRe" +
"quest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022" +
"\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003 \001(\014\"\027\n\025Compac" +
"tRegionResponse\"t\n\023MergeRegionsRequest\022!" +
"\n\007regionA\030\001 \002(\0132\020.RegionSpecifier\022!\n\007reg" +
"ionB\030\002 \002(\0132\020.RegionSpecifier\022\027\n\010forcible" +
"\030\003 \001(\010:\005false\"\026\n\024MergeRegionsResponse\"7\n" +
"\010WALEntry\022\024\n\003key\030\001 \002(\0132\007.WALKey\022\025\n\rkeyVa",
"lueBytes\030\002 \003(\014\"4\n\030ReplicateWALEntryReque" +
"st\022\030\n\005entry\030\001 \003(\0132\t.WALEntry\"\033\n\031Replicat" +
"eWALEntryResponse\"\026\n\024RollWALWriterReques" +
"t\".\n\025RollWALWriterResponse\022\025\n\rregionToFl" +
"ush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006reason" +
"\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetServe" +
"rInfoRequest\"@\n\nServerInfo\022\037\n\nserverName" +
"\030\001 \002(\0132\013.ServerName\022\021\n\twebuiPort\030\002 \001(\r\"8" +
"\n\025GetServerInfoResponse\022\037\n\nserverInfo\030\001 " +
"\002(\0132\013.ServerInfo2\266\006\n\014AdminService\022>\n\rget",
"RegionInfo\022\025.GetRegionInfoRequest\032\026.GetR" +
"egionInfoResponse\022;\n\014getStoreFile\022\024.GetS" +
"toreFileRequest\032\025.GetStoreFileResponse\022D" +
"\n\017getOnlineRegion\022\027.GetOnlineRegionReque" +
"st\032\030.GetOnlineRegionResponse\0225\n\nopenRegi" +
"on\022\022.OpenRegionRequest\032\023.OpenRegionRespo" +
"nse\0228\n\013closeRegion\022\023.CloseRegionRequest\032" +
"\024.CloseRegionResponse\0228\n\013flushRegion\022\023.F" +
"lushRegionRequest\032\024.FlushRegionResponse\022" +
"8\n\013splitRegion\022\023.SplitRegionRequest\032\024.Sp",
"litRegionResponse\022>\n\rcompactRegion\022\025.Com" +
"pactRegionRequest\032\026.CompactRegionRespons" +
"e\022;\n\014mergeRegions\022\024.MergeRegionsRequest\032" +
"\025.MergeRegionsResponse\022J\n\021replicateWALEn" +
"try\022\031.ReplicateWALEntryRequest\032\032.Replica" +
"teWALEntryResponse\022>\n\rrollWALWriter\022\025.Ro" +
"llWALWriterRequest\032\026.RollWALWriterRespon" +
"se\022>\n\rgetServerInfo\022\025.GetServerInfoReque" +
"st\032\026.GetServerInfoResponse\0225\n\nstopServer" +
"\022\022.StopServerRequest\032\023.StopServerRespons",
"eBA\n*org.apache.hadoop.hbase.protobuf.ge" +
"neratedB\013AdminProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -15102,7 +15394,7 @@ public final class AdminProtos {
internal_static_OpenRegionRequest_RegionOpenInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_OpenRegionRequest_RegionOpenInfo_descriptor,
new java.lang.String[] { "Region", "VersionOfOfflineNode", },
new java.lang.String[] { "Region", "VersionOfOfflineNode", "FavoredNodes", },
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.class,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder.class);
internal_static_OpenRegionResponse_descriptor =

View File

@ -3832,6 +3832,599 @@ public final class HBaseProtos {
// @@protoc_insertion_point(class_scope:RegionInfo)
}
public interface FavoredNodesOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .ServerName favoredNode = 1;
java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>
getFavoredNodeList();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNode(int index);
int getFavoredNodeCount();
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getFavoredNodeOrBuilderList();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodeOrBuilder(
int index);
}
public static final class FavoredNodes extends
com.google.protobuf.GeneratedMessage
implements FavoredNodesOrBuilder {
// Use FavoredNodes.newBuilder() to construct.
private FavoredNodes(Builder builder) {
super(builder);
}
private FavoredNodes(boolean noInit) {}
private static final FavoredNodes defaultInstance;
public static FavoredNodes getDefaultInstance() {
return defaultInstance;
}
public FavoredNodes getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_FavoredNodes_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_FavoredNodes_fieldAccessorTable;
}
// repeated .ServerName favoredNode = 1;
public static final int FAVOREDNODE_FIELD_NUMBER = 1;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> favoredNode_;
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> getFavoredNodeList() {
return favoredNode_;
}
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getFavoredNodeOrBuilderList() {
return favoredNode_;
}
public int getFavoredNodeCount() {
return favoredNode_.size();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNode(int index) {
return favoredNode_.get(index);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodeOrBuilder(
int index) {
return favoredNode_.get(index);
}
private void initFields() {
favoredNode_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getFavoredNodeCount(); i++) {
if (!getFavoredNode(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < favoredNode_.size(); i++) {
output.writeMessage(1, favoredNode_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < favoredNode_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, favoredNode_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes) obj;
boolean result = true;
result = result && getFavoredNodeList()
.equals(other.getFavoredNodeList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getFavoredNodeCount() > 0) {
hash = (37 * hash) + FAVOREDNODE_FIELD_NUMBER;
hash = (53 * hash) + getFavoredNodeList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodesOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_FavoredNodes_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_FavoredNodes_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getFavoredNodeFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (favoredNodeBuilder_ == null) {
favoredNode_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
favoredNodeBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.getDescriptor();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes build() {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes(this);
int from_bitField0_ = bitField0_;
if (favoredNodeBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
favoredNode_ = java.util.Collections.unmodifiableList(favoredNode_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.favoredNode_ = favoredNode_;
} else {
result.favoredNode_ = favoredNodeBuilder_.build();
}
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.getDefaultInstance()) return this;
if (favoredNodeBuilder_ == null) {
if (!other.favoredNode_.isEmpty()) {
if (favoredNode_.isEmpty()) {
favoredNode_ = other.favoredNode_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureFavoredNodeIsMutable();
favoredNode_.addAll(other.favoredNode_);
}
onChanged();
}
} else {
if (!other.favoredNode_.isEmpty()) {
if (favoredNodeBuilder_.isEmpty()) {
favoredNodeBuilder_.dispose();
favoredNodeBuilder_ = null;
favoredNode_ = other.favoredNode_;
bitField0_ = (bitField0_ & ~0x00000001);
favoredNodeBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getFavoredNodeFieldBuilder() : null;
} else {
favoredNodeBuilder_.addAllMessages(other.favoredNode_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
for (int i = 0; i < getFavoredNodeCount(); i++) {
if (!getFavoredNode(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder();
input.readMessage(subBuilder, extensionRegistry);
addFavoredNode(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// repeated .ServerName favoredNode = 1;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> favoredNode_ =
java.util.Collections.emptyList();
private void ensureFavoredNodeIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
favoredNode_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>(favoredNode_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> favoredNodeBuilder_;
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> getFavoredNodeList() {
if (favoredNodeBuilder_ == null) {
return java.util.Collections.unmodifiableList(favoredNode_);
} else {
return favoredNodeBuilder_.getMessageList();
}
}
public int getFavoredNodeCount() {
if (favoredNodeBuilder_ == null) {
return favoredNode_.size();
} else {
return favoredNodeBuilder_.getCount();
}
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNode(int index) {
if (favoredNodeBuilder_ == null) {
return favoredNode_.get(index);
} else {
return favoredNodeBuilder_.getMessage(index);
}
}
public Builder setFavoredNode(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (favoredNodeBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureFavoredNodeIsMutable();
favoredNode_.set(index, value);
onChanged();
} else {
favoredNodeBuilder_.setMessage(index, value);
}
return this;
}
public Builder setFavoredNode(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (favoredNodeBuilder_ == null) {
ensureFavoredNodeIsMutable();
favoredNode_.set(index, builderForValue.build());
onChanged();
} else {
favoredNodeBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
public Builder addFavoredNode(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (favoredNodeBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureFavoredNodeIsMutable();
favoredNode_.add(value);
onChanged();
} else {
favoredNodeBuilder_.addMessage(value);
}
return this;
}
public Builder addFavoredNode(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (favoredNodeBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureFavoredNodeIsMutable();
favoredNode_.add(index, value);
onChanged();
} else {
favoredNodeBuilder_.addMessage(index, value);
}
return this;
}
public Builder addFavoredNode(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (favoredNodeBuilder_ == null) {
ensureFavoredNodeIsMutable();
favoredNode_.add(builderForValue.build());
onChanged();
} else {
favoredNodeBuilder_.addMessage(builderForValue.build());
}
return this;
}
public Builder addFavoredNode(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (favoredNodeBuilder_ == null) {
ensureFavoredNodeIsMutable();
favoredNode_.add(index, builderForValue.build());
onChanged();
} else {
favoredNodeBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
public Builder addAllFavoredNode(
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> values) {
if (favoredNodeBuilder_ == null) {
ensureFavoredNodeIsMutable();
super.addAll(values, favoredNode_);
onChanged();
} else {
favoredNodeBuilder_.addAllMessages(values);
}
return this;
}
public Builder clearFavoredNode() {
if (favoredNodeBuilder_ == null) {
favoredNode_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
favoredNodeBuilder_.clear();
}
return this;
}
public Builder removeFavoredNode(int index) {
if (favoredNodeBuilder_ == null) {
ensureFavoredNodeIsMutable();
favoredNode_.remove(index);
onChanged();
} else {
favoredNodeBuilder_.remove(index);
}
return this;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getFavoredNodeBuilder(
int index) {
return getFavoredNodeFieldBuilder().getBuilder(index);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodeOrBuilder(
int index) {
if (favoredNodeBuilder_ == null) {
return favoredNode_.get(index); } else {
return favoredNodeBuilder_.getMessageOrBuilder(index);
}
}
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getFavoredNodeOrBuilderList() {
if (favoredNodeBuilder_ != null) {
return favoredNodeBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(favoredNode_);
}
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addFavoredNodeBuilder() {
return getFavoredNodeFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addFavoredNodeBuilder(
int index) {
return getFavoredNodeFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
}
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder>
getFavoredNodeBuilderList() {
return getFavoredNodeFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getFavoredNodeFieldBuilder() {
if (favoredNodeBuilder_ == null) {
favoredNodeBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
favoredNode_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
favoredNode_ = null;
}
return favoredNodeBuilder_;
}
// @@protoc_insertion_point(builder_scope:FavoredNodes)
}
static {
defaultInstance = new FavoredNodes(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:FavoredNodes)
}
public interface RegionSpecifierOrBuilder
extends com.google.protobuf.MessageOrBuilder {
@ -14018,6 +14611,11 @@ public final class HBaseProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_RegionInfo_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_FavoredNodes_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_FavoredNodes_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_RegionSpecifier_descriptor;
private static
@ -14124,55 +14722,57 @@ public final class HBaseProtos {
"\rconfiguration\030\003 \003(\0132\017.NameStringPair\"s\n",
"\nRegionInfo\022\020\n\010regionId\030\001 \002(\004\022\021\n\ttableNa" +
"me\030\002 \002(\014\022\020\n\010startKey\030\003 \001(\014\022\016\n\006endKey\030\004 \001" +
"(\014\022\017\n\007offline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\"\225\001\n\017R" +
"egionSpecifier\0222\n\004type\030\001 \002(\0162$.RegionSpe" +
"cifier.RegionSpecifierType\022\r\n\005value\030\002 \002(" +
"\014\"?\n\023RegionSpecifierType\022\017\n\013REGION_NAME\020" +
"\001\022\027\n\023ENCODED_REGION_NAME\020\002\"\260\003\n\nRegionLoa" +
"d\022)\n\017regionSpecifier\030\001 \002(\0132\020.RegionSpeci" +
"fier\022\016\n\006stores\030\002 \001(\r\022\022\n\nstorefiles\030\003 \001(\r" +
"\022\037\n\027storeUncompressedSizeMB\030\004 \001(\r\022\027\n\017sto",
"refileSizeMB\030\005 \001(\r\022\026\n\016memstoreSizeMB\030\006 \001" +
"(\r\022\034\n\024storefileIndexSizeMB\030\007 \001(\r\022\031\n\021read" +
"RequestsCount\030\010 \001(\004\022\032\n\022writeRequestsCoun" +
"t\030\t \001(\004\022\032\n\022totalCompactingKVs\030\n \001(\004\022\033\n\023c" +
"urrentCompactedKVs\030\013 \001(\004\022\027\n\017rootIndexSiz" +
"eKB\030\014 \001(\r\022\036\n\026totalStaticIndexSizeKB\030\r \001(" +
"\r\022\036\n\026totalStaticBloomSizeKB\030\016 \001(\r\022\032\n\022com" +
"pleteSequenceId\030\017 \001(\004\"\372\001\n\nServerLoad\022\030\n\020" +
"numberOfRequests\030\001 \001(\r\022\035\n\025totalNumberOfR" +
"equests\030\002 \001(\r\022\022\n\nusedHeapMB\030\003 \001(\r\022\021\n\tmax",
"HeapMB\030\004 \001(\r\022 \n\013regionLoads\030\005 \003(\0132\013.Regi" +
"onLoad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocess" +
"or\022\027\n\017reportStartTime\030\007 \001(\004\022\025\n\rreportEnd" +
"Time\030\010 \001(\004\022\026\n\016infoServerPort\030\t \001(\r\"%\n\tTi" +
"meRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"0\n\006Fil" +
"ter\022\014\n\004name\030\001 \002(\t\022\030\n\020serializedFilter\030\002 " +
"\001(\014\"x\n\010KeyValue\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002" +
" \002(\014\022\021\n\tqualifier\030\003 \002(\014\022\021\n\ttimestamp\030\004 \001" +
"(\004\022\032\n\007keyType\030\005 \001(\0162\t.CellType\022\r\n\005value\030" +
"\006 \001(\014\"?\n\nServerName\022\020\n\010hostName\030\001 \002(\t\022\014\n",
"\004port\030\002 \001(\r\022\021\n\tstartCode\030\003 \001(\004\"\033\n\013Coproc" +
"essor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n" +
"\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesP" +
"air\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016Byte" +
"sBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(" +
"\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005valu" +
"e\030\002 \001(\003\"\255\001\n\023SnapshotDescription\022\014\n\004name\030" +
"\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\027\n\014creationTime\030\003 \001" +
"(\003:\0010\022.\n\004type\030\004 \001(\0162\031.SnapshotDescriptio" +
"n.Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\"\037\n\004Type\022\014",
"\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\"\n\n\010EmptyMsg\"\032\n\007L" +
"ongMsg\022\017\n\007longMsg\030\001 \002(\003\"&\n\rBigDecimalMsg" +
"\022\025\n\rbigdecimalMsg\030\001 \002(\014\"1\n\004UUID\022\024\n\014least" +
"SigBits\030\001 \002(\004\022\023\n\013mostSigBits\030\002 \002(\004*`\n\010Ce" +
"llType\022\013\n\007MINIMUM\020\000\022\007\n\003PUT\020\004\022\n\n\006DELETE\020\010" +
"\022\021\n\rDELETE_COLUMN\020\014\022\021\n\rDELETE_FAMILY\020\016\022\014" +
"\n\007MAXIMUM\020\377\001*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n" +
"\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL" +
"\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n" +
"\005NO_OP\020\006B>\n*org.apache.hadoop.hbase.prot",
"obuf.generatedB\013HBaseProtosH\001\240\001\001"
"(\014\022\017\n\007offline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\"0\n\014Fa" +
"voredNodes\022 \n\013favoredNode\030\001 \003(\0132\013.Server" +
"Name\"\225\001\n\017RegionSpecifier\0222\n\004type\030\001 \002(\0162$" +
".RegionSpecifier.RegionSpecifierType\022\r\n\005" +
"value\030\002 \002(\014\"?\n\023RegionSpecifierType\022\017\n\013RE" +
"GION_NAME\020\001\022\027\n\023ENCODED_REGION_NAME\020\002\"\260\003\n" +
"\nRegionLoad\022)\n\017regionSpecifier\030\001 \002(\0132\020.R" +
"egionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\nstoref",
"iles\030\003 \001(\r\022\037\n\027storeUncompressedSizeMB\030\004 " +
"\001(\r\022\027\n\017storefileSizeMB\030\005 \001(\r\022\026\n\016memstore" +
"SizeMB\030\006 \001(\r\022\034\n\024storefileIndexSizeMB\030\007 \001" +
"(\r\022\031\n\021readRequestsCount\030\010 \001(\004\022\032\n\022writeRe" +
"questsCount\030\t \001(\004\022\032\n\022totalCompactingKVs\030" +
"\n \001(\004\022\033\n\023currentCompactedKVs\030\013 \001(\004\022\027\n\017ro" +
"otIndexSizeKB\030\014 \001(\r\022\036\n\026totalStaticIndexS" +
"izeKB\030\r \001(\r\022\036\n\026totalStaticBloomSizeKB\030\016 " +
"\001(\r\022\032\n\022completeSequenceId\030\017 \001(\004\"\372\001\n\nServ" +
"erLoad\022\030\n\020numberOfRequests\030\001 \001(\r\022\035\n\025tota",
"lNumberOfRequests\030\002 \001(\r\022\022\n\nusedHeapMB\030\003 " +
"\001(\r\022\021\n\tmaxHeapMB\030\004 \001(\r\022 \n\013regionLoads\030\005 " +
"\003(\0132\013.RegionLoad\022\"\n\014coprocessors\030\006 \003(\0132\014" +
".Coprocessor\022\027\n\017reportStartTime\030\007 \001(\004\022\025\n" +
"\rreportEndTime\030\010 \001(\004\022\026\n\016infoServerPort\030\t" +
" \001(\r\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 " +
"\001(\004\"0\n\006Filter\022\014\n\004name\030\001 \002(\t\022\030\n\020serialize" +
"dFilter\030\002 \001(\014\"x\n\010KeyValue\022\013\n\003row\030\001 \002(\014\022\016" +
"\n\006family\030\002 \002(\014\022\021\n\tqualifier\030\003 \002(\014\022\021\n\ttim" +
"estamp\030\004 \001(\004\022\032\n\007keyType\030\005 \001(\0162\t.CellType",
"\022\r\n\005value\030\006 \001(\014\"?\n\nServerName\022\020\n\010hostNam" +
"e\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\021\n\tstartCode\030\003 \001(\004" +
"\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStr" +
"ingPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\r" +
"NameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001" +
"(\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006s" +
"econd\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 \001" +
"(\t\022\r\n\005value\030\002 \001(\003\"\255\001\n\023SnapshotDescriptio" +
"n\022\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\027\n\014creati" +
"onTime\030\003 \001(\003:\0010\022.\n\004type\030\004 \001(\0162\031.Snapshot",
"Description.Type:\005FLUSH\022\017\n\007version\030\005 \001(\005" +
"\"\037\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\"\n\n\010Emp" +
"tyMsg\"\032\n\007LongMsg\022\017\n\007longMsg\030\001 \002(\003\"&\n\rBig" +
"DecimalMsg\022\025\n\rbigdecimalMsg\030\001 \002(\014\"1\n\004UUI" +
"D\022\024\n\014leastSigBits\030\001 \002(\004\022\023\n\013mostSigBits\030\002" +
" \002(\004*`\n\010CellType\022\013\n\007MINIMUM\020\000\022\007\n\003PUT\020\004\022\n" +
"\n\006DELETE\020\010\022\021\n\rDELETE_COLUMN\020\014\022\021\n\rDELETE_" +
"FAMILY\020\016\022\014\n\007MAXIMUM\020\377\001*r\n\013CompareType\022\010\n" +
"\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n" +
"\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GR",
"EATER\020\005\022\t\n\005NO_OP\020\006B>\n*org.apache.hadoop." +
"hbase.protobuf.generatedB\013HBaseProtosH\001\240" +
"\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -14211,8 +14811,16 @@ public final class HBaseProtos {
new java.lang.String[] { "RegionId", "TableName", "StartKey", "EndKey", "Offline", "Split", },
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder.class);
internal_static_RegionSpecifier_descriptor =
internal_static_FavoredNodes_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_FavoredNodes_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_FavoredNodes_descriptor,
new java.lang.String[] { "FavoredNode", },
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.Builder.class);
internal_static_RegionSpecifier_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_RegionSpecifier_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionSpecifier_descriptor,
@ -14220,7 +14828,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder.class);
internal_static_RegionLoad_descriptor =
getDescriptor().getMessageTypes().get(5);
getDescriptor().getMessageTypes().get(6);
internal_static_RegionLoad_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionLoad_descriptor,
@ -14228,7 +14836,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.Builder.class);
internal_static_ServerLoad_descriptor =
getDescriptor().getMessageTypes().get(6);
getDescriptor().getMessageTypes().get(7);
internal_static_ServerLoad_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ServerLoad_descriptor,
@ -14236,7 +14844,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder.class);
internal_static_TimeRange_descriptor =
getDescriptor().getMessageTypes().get(7);
getDescriptor().getMessageTypes().get(8);
internal_static_TimeRange_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_TimeRange_descriptor,
@ -14244,7 +14852,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder.class);
internal_static_Filter_descriptor =
getDescriptor().getMessageTypes().get(8);
getDescriptor().getMessageTypes().get(9);
internal_static_Filter_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_Filter_descriptor,
@ -14252,7 +14860,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder.class);
internal_static_KeyValue_descriptor =
getDescriptor().getMessageTypes().get(9);
getDescriptor().getMessageTypes().get(10);
internal_static_KeyValue_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_KeyValue_descriptor,
@ -14260,7 +14868,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.Builder.class);
internal_static_ServerName_descriptor =
getDescriptor().getMessageTypes().get(10);
getDescriptor().getMessageTypes().get(11);
internal_static_ServerName_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ServerName_descriptor,
@ -14268,7 +14876,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder.class);
internal_static_Coprocessor_descriptor =
getDescriptor().getMessageTypes().get(11);
getDescriptor().getMessageTypes().get(12);
internal_static_Coprocessor_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_Coprocessor_descriptor,
@ -14276,7 +14884,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder.class);
internal_static_NameStringPair_descriptor =
getDescriptor().getMessageTypes().get(12);
getDescriptor().getMessageTypes().get(13);
internal_static_NameStringPair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_NameStringPair_descriptor,
@ -14284,7 +14892,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder.class);
internal_static_NameBytesPair_descriptor =
getDescriptor().getMessageTypes().get(13);
getDescriptor().getMessageTypes().get(14);
internal_static_NameBytesPair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_NameBytesPair_descriptor,
@ -14292,7 +14900,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.Builder.class);
internal_static_BytesBytesPair_descriptor =
getDescriptor().getMessageTypes().get(14);
getDescriptor().getMessageTypes().get(15);
internal_static_BytesBytesPair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_BytesBytesPair_descriptor,
@ -14300,7 +14908,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder.class);
internal_static_NameInt64Pair_descriptor =
getDescriptor().getMessageTypes().get(15);
getDescriptor().getMessageTypes().get(16);
internal_static_NameInt64Pair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_NameInt64Pair_descriptor,
@ -14308,7 +14916,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.Builder.class);
internal_static_SnapshotDescription_descriptor =
getDescriptor().getMessageTypes().get(16);
getDescriptor().getMessageTypes().get(17);
internal_static_SnapshotDescription_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SnapshotDescription_descriptor,
@ -14316,7 +14924,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder.class);
internal_static_EmptyMsg_descriptor =
getDescriptor().getMessageTypes().get(17);
getDescriptor().getMessageTypes().get(18);
internal_static_EmptyMsg_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_EmptyMsg_descriptor,
@ -14324,7 +14932,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg.Builder.class);
internal_static_LongMsg_descriptor =
getDescriptor().getMessageTypes().get(18);
getDescriptor().getMessageTypes().get(19);
internal_static_LongMsg_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_LongMsg_descriptor,
@ -14332,7 +14940,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg.Builder.class);
internal_static_BigDecimalMsg_descriptor =
getDescriptor().getMessageTypes().get(19);
getDescriptor().getMessageTypes().get(20);
internal_static_BigDecimalMsg_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_BigDecimalMsg_descriptor,
@ -14340,7 +14948,7 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg.Builder.class);
internal_static_UUID_descriptor =
getDescriptor().getMessageTypes().get(20);
getDescriptor().getMessageTypes().get(21);
internal_static_UUID_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_UUID_descriptor,

View File

@ -70,6 +70,7 @@ message OpenRegionRequest {
message RegionOpenInfo {
required RegionInfo region = 1;
optional uint32 versionOfOfflineNode = 2;
repeated ServerName favoredNodes = 3;
}
}

View File

@ -83,6 +83,13 @@ message RegionInfo {
optional bool split = 6;
}
/**
* Protocol buffer for favored nodes
*/
message FavoredNodes {
repeated ServerName favoredNode = 1;
}
/**
* Container protocol buffer to specify a region.
* You can specify region by region name, or the hash

View File

@ -133,7 +133,7 @@ public class MetaEditor {
* @param ps Put to add to .META.
* @throws IOException
*/
static void putsToMetaTable(final CatalogTracker ct, final List<Put> ps)
public static void putsToMetaTable(final CatalogTracker ct, final List<Put> ps)
throws IOException {
HTable t = MetaReader.getMetaHTable(ct);
try {

View File

@ -62,6 +62,8 @@ import org.apache.hadoop.hbase.exceptions.TableNotFoundException;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer;
import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler;
import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
@ -73,6 +75,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.KeyLocker;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.Triple;
import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZKTable;
@ -106,6 +109,8 @@ public class AssignmentManager extends ZooKeeperListener {
private ServerManager serverManager;
private boolean shouldAssignRegionsWithFavoredNodes;
private CatalogTracker catalogTracker;
protected final TimeoutMonitor timeoutMonitor;
@ -218,6 +223,10 @@ public class AssignmentManager extends ZooKeeperListener {
this.regionsToReopen = Collections.synchronizedMap
(new HashMap<String, HRegionInfo> ());
Configuration conf = server.getConfiguration();
// Only read favored nodes if using the favored nodes load balancer.
this.shouldAssignRegionsWithFavoredNodes = conf.getClass(
HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class).equals(
FavoredNodeLoadBalancer.class);
this.tomActivated = conf.getBoolean("hbase.assignment.timeout.management", false);
if (tomActivated){
this.serversInUpdatingTimer = new ConcurrentSkipListSet<ServerName>();
@ -971,6 +980,24 @@ public class AssignmentManager extends ZooKeeperListener {
return false;
}
// TODO: processFavoredNodes might throw an exception, for e.g., if the
// meta could not be contacted/updated. We need to see how seriously to treat
// this problem as. Should we fail the current assignment. We should be able
// to recover from this problem eventually (if the meta couldn't be updated
// things should work normally and eventually get fixed up).
void processFavoredNodes(List<HRegionInfo> regions) throws IOException {
if (!shouldAssignRegionsWithFavoredNodes) return;
// The AM gets the favored nodes info for each region and updates the meta
// table with that info
Map<HRegionInfo, List<ServerName>> regionToFavoredNodes =
new HashMap<HRegionInfo, List<ServerName>>();
for (HRegionInfo region : regions) {
regionToFavoredNodes.put(region,
((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region));
}
FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes, catalogTracker);
}
/**
* If the passed regionState is in PENDING_CLOSE, clean up PENDING_CLOSE
* state and convert it to SPLITTING instead.
@ -1495,8 +1522,8 @@ public class AssignmentManager extends ZooKeeperListener {
// that unnecessary timeout on RIT is reduced.
this.addPlans(plans);
List<Pair<HRegionInfo, Integer>> regionOpenInfos =
new ArrayList<Pair<HRegionInfo, Integer>>(states.size());
List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos =
new ArrayList<Triple<HRegionInfo, Integer, List<ServerName>>>(states.size());
for (RegionState state: states) {
HRegionInfo region = state.getRegion();
String encodedRegionName = region.getEncodedName();
@ -1509,8 +1536,12 @@ public class AssignmentManager extends ZooKeeperListener {
} else {
regionStates.updateRegionState(region,
RegionState.State.PENDING_OPEN, destination);
regionOpenInfos.add(new Pair<HRegionInfo, Integer>(
region, nodeVersion));
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (this.shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region);
}
regionOpenInfos.add(new Triple<HRegionInfo, Integer, List<ServerName>>(
region, nodeVersion, favoredNodes));
}
}
@ -1787,8 +1818,12 @@ public class AssignmentManager extends ZooKeeperListener {
final String assignMsg = "Failed assignment of " + region.getRegionNameAsString() +
" to " + plan.getDestination();
try {
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (this.shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region);
}
regionOpenState = serverManager.sendRegionOpen(
plan.getDestination(), region, versionOfOfflineNode);
plan.getDestination(), region, versionOfOfflineNode, favoredNodes);
if (regionOpenState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, looping again on a new server.
@ -2027,6 +2062,15 @@ public class AssignmentManager extends ZooKeeperListener {
newPlan = true;
randomPlan = new RegionPlan(region, null,
balancer.randomAssignment(region, destServers));
if (!region.isMetaTable() && shouldAssignRegionsWithFavoredNodes) {
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
regions.add(region);
try {
processFavoredNodes(regions);
} catch (IOException ie) {
LOG.warn("Ignoring exception in processFavoredNodes " + ie);
}
}
this.regionPlans.put(encodedName, randomPlan);
}
}
@ -2345,6 +2389,7 @@ public class AssignmentManager extends ZooKeeperListener {
// Generate a round-robin bulk assignment plan
Map<ServerName, List<HRegionInfo>> bulkPlan
= balancer.roundRobinAssignment(regions, servers);
processFavoredNodes(regions);
assign(regions.size(), servers.size(),
"round-robin=true", bulkPlan);
@ -2402,8 +2447,14 @@ public class AssignmentManager extends ZooKeeperListener {
Set<String> disabledOrDisablingOrEnabling = ZKTable.getDisabledOrDisablingTables(watcher);
disabledOrDisablingOrEnabling.addAll(ZKTable.getEnablingTables(watcher));
// Scan META for all user regions, skipping any disabled tables
Map<HRegionInfo, ServerName> allRegions = MetaReader.fullScan(
catalogTracker, disabledOrDisablingOrEnabling, true);
Map<HRegionInfo, ServerName> allRegions = null;
if (this.shouldAssignRegionsWithFavoredNodes) {
allRegions = FavoredNodeAssignmentHelper.fullScan(
catalogTracker, disabledOrDisablingOrEnabling, true, (FavoredNodeLoadBalancer)balancer);
} else {
allRegions = MetaReader.fullScan(
catalogTracker, disabledOrDisablingOrEnabling, true);
}
if (allRegions == null || allRegions.isEmpty()) return;
// Determine what type of assignment to do on startup

View File

@ -0,0 +1,69 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.ScriptBasedMapping;
/**
* Wrapper over the rack resolution utility in Hadoop. The rack resolution
* utility in Hadoop does resolution from hosts to the racks they belong to.
*
*/
@InterfaceAudience.Private
public class RackManager {
static final Log LOG = LogFactory.getLog(RackManager.class);
public static final String UNKNOWN_RACK = "Unknown Rack";
private DNSToSwitchMapping switchMapping;
public RackManager(Configuration conf) {
switchMapping = ReflectionUtils.instantiateWithCustomCtor(
conf.getClass("hbase.util.ip.to.rack.determiner", ScriptBasedMapping.class,
DNSToSwitchMapping.class).getName(), new Class<?>[]{Configuration.class},
new Object[]{conf});
}
/**
* Get the name of the rack containing a server, according to the DNS to
* switch mapping.
* @param server the server for which to get the rack name
* @return the rack name of the server
*/
public String getRack(ServerName server) {
if (server == null) {
return UNKNOWN_RACK;
}
// just a note - switchMapping caches results (at least the implementation should unless the
// resolution is really a lightweight process)
List<String> racks = switchMapping.resolve(Arrays.asList(server.getHostname()));
if (racks != null && !racks.isEmpty()) {
return racks.get(0);
}
return UNKNOWN_RACK;
}
}

View File

@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Triple;
import com.google.protobuf.ServiceException;
@ -587,9 +588,10 @@ public class ServerManager {
* @param region region to open
* @param versionOfOfflineNode that needs to be present in the offline node
* when RS tries to change the state from OFFLINE to other states.
* @param favoredNodes
*/
public RegionOpeningState sendRegionOpen(final ServerName server,
HRegionInfo region, int versionOfOfflineNode)
HRegionInfo region, int versionOfOfflineNode, List<ServerName> favoredNodes)
throws IOException {
AdminService.BlockingInterface admin = getRsAdmin(server);
if (admin == null) {
@ -598,7 +600,7 @@ public class ServerManager {
return RegionOpeningState.FAILED_OPENING;
}
OpenRegionRequest request =
RequestConverter.buildOpenRegionRequest(region, versionOfOfflineNode);
RequestConverter.buildOpenRegionRequest(region, versionOfOfflineNode, favoredNodes);
try {
OpenRegionResponse response = admin.openRegion(null, request);
return ResponseConverter.getRegionOpeningState(response);
@ -617,7 +619,7 @@ public class ServerManager {
* @return a list of region opening states
*/
public List<RegionOpeningState> sendRegionOpen(ServerName server,
List<Pair<HRegionInfo, Integer>> regionOpenInfos)
List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos)
throws IOException {
AdminService.BlockingInterface admin = getRsAdmin(server);
if (admin == null) {

View File

@ -0,0 +1,444 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.balancer;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.catalog.MetaReader.Visitor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.master.RackManager;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import com.google.protobuf.InvalidProtocolBufferException;
/**
* Helper class for {@link FavoredNodeLoadBalancer} that has all the intelligence
* for racks, meta scans, etc. Instantiated by the {@link FavoredNodeLoadBalancer}
* when needed (from within calls like
* {@link FavoredNodeLoadBalancer#randomAssignment(HRegionInfo, List)}).
*
*/
@InterfaceAudience.Private
public class FavoredNodeAssignmentHelper {
private static final Log LOG = LogFactory.getLog(FavoredNodeAssignmentHelper.class);
private RackManager rackManager;
private Map<String, List<ServerName>> rackToRegionServerMap;
private List<String> uniqueRackList;
private Map<ServerName, String> regionServerToRackMap;
private Random random;
private List<ServerName> servers;
public static final byte [] FAVOREDNODES_QUALIFIER = Bytes.toBytes("fn");
public final static short FAVORED_NODES_NUM = 3;
public FavoredNodeAssignmentHelper(final List<ServerName> servers, Configuration conf) {
this.servers = servers;
this.rackManager = new RackManager(conf);
this.rackToRegionServerMap = new HashMap<String, List<ServerName>>();
this.regionServerToRackMap = new HashMap<ServerName, String>();
this.uniqueRackList = new ArrayList<String>();
this.random = new Random();
}
// For unit tests
void setRackManager(RackManager rackManager) {
this.rackManager = rackManager;
}
/**
* Perform full scan of the meta table similar to
* {@link MetaReader#fullScan(CatalogTracker, Set, boolean)} except that this is
* aware of the favored nodes
* @param catalogTracker
* @param disabledTables
* @param excludeOfflinedSplitParents
* @param balancer required because we need to let the balancer know about the
* current favored nodes from meta scan
* @return Returns a map of every region to it's currently assigned server,
* according to META. If the region does not have an assignment it will have
* a null value in the map.
* @throws IOException
*/
public static Map<HRegionInfo, ServerName> fullScan(
CatalogTracker catalogTracker, final Set<String> disabledTables,
final boolean excludeOfflinedSplitParents,
FavoredNodeLoadBalancer balancer) throws IOException {
final Map<HRegionInfo, ServerName> regions =
new TreeMap<HRegionInfo, ServerName>();
final Map<HRegionInfo, ServerName[]> favoredNodesMap =
new HashMap<HRegionInfo, ServerName[]>();
Visitor v = new Visitor() {
@Override
public boolean visit(Result r) throws IOException {
if (r == null || r.isEmpty()) return true;
Pair<HRegionInfo, ServerName> region = HRegionInfo.getHRegionInfoAndServerName(r);
HRegionInfo hri = region.getFirst();
if (hri == null) return true;
if (hri.getTableNameAsString() == null) return true;
if (disabledTables.contains(
hri.getTableNameAsString())) return true;
// Are we to include split parents in the list?
if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
regions.put(hri, region.getSecond());
byte[] favoredNodes = r.getValue(HConstants.CATALOG_FAMILY,
FavoredNodeAssignmentHelper.FAVOREDNODES_QUALIFIER);
if (favoredNodes != null) {
ServerName[] favoredServerList =
FavoredNodeAssignmentHelper.getFavoredNodesList(favoredNodes);
favoredNodesMap.put(hri, favoredServerList);
}
return true;
}
};
MetaReader.fullScan(catalogTracker, v);
balancer.noteFavoredNodes(favoredNodesMap);
return regions;
}
public static void updateMetaWithFavoredNodesInfo(
Map<HRegionInfo, List<ServerName>> regionToFavoredNodes,
CatalogTracker catalogTracker) throws IOException {
List<Put> puts = new ArrayList<Put>();
for (Map.Entry<HRegionInfo, List<ServerName>> entry : regionToFavoredNodes.entrySet()) {
Put put = makePutFromRegionInfo(entry.getKey(), entry.getValue());
if (put != null) {
puts.add(put);
}
}
MetaEditor.putsToMetaTable(catalogTracker, puts);
LOG.info("Added " + puts.size() + " regions in META");
}
/**
* Generates and returns a Put containing the region info for the catalog table
* and the servers
* @param regionInfo
* @param favoredNodeList
* @return Put object
*/
static Put makePutFromRegionInfo(HRegionInfo regionInfo, List<ServerName>favoredNodeList)
throws IOException {
Put put = null;
if (favoredNodeList != null) {
put = MetaEditor.makePutFromRegionInfo(regionInfo);
byte[] favoredNodes = getFavoredNodes(favoredNodeList);
put.add(HConstants.CATALOG_FAMILY, FAVOREDNODES_QUALIFIER,
EnvironmentEdgeManager.currentTimeMillis(), favoredNodes);
LOG.info("Create the region " + regionInfo.getRegionNameAsString() +
" with favored nodes " + favoredNodes);
}
return put;
}
/**
* @param favoredNodes The PB'ed bytes of favored nodes
* @return the array of {@link ServerName} for the byte array of favored nodes.
* @throws InvalidProtocolBufferException
*/
public static ServerName[] getFavoredNodesList(byte[] favoredNodes)
throws InvalidProtocolBufferException {
FavoredNodes f = FavoredNodes.parseFrom(favoredNodes);
List<HBaseProtos.ServerName> protoNodes = f.getFavoredNodeList();
ServerName[] servers = new ServerName[protoNodes.size()];
int i = 0;
for (HBaseProtos.ServerName node : protoNodes) {
servers[i++] = ProtobufUtil.toServerName(node);
}
return servers;
}
/**
* @param serverList
* @return PB'ed bytes of {@link FavoredNodes} generated by the server list.
*/
static byte[] getFavoredNodes(List<ServerName> serverAddrList) {
FavoredNodes.Builder f = FavoredNodes.newBuilder();
for (ServerName s : serverAddrList) {
HBaseProtos.ServerName.Builder b = HBaseProtos.ServerName.newBuilder();
b.setHostName(s.getHostname());
b.setPort(s.getPort());
b.setStartCode(s.getStartcode());
f.addFavoredNode(b.build());
}
return f.build().toByteArray();
}
// Place the regions round-robin across the racks picking one server from each
// rack at a time. For example, if 2 racks (r1 and r2) with 8 servers (s1..s8) each, it will
// choose s1 from r1, s1 from r2, s2 from r1, s2 from r2, ...
void placePrimaryRSAsRoundRobin(Map<ServerName, List<HRegionInfo>> assignmentMap,
Map<HRegionInfo, ServerName> primaryRSMap, List<HRegionInfo> regions) {
List<String> rackList = new ArrayList<String>(rackToRegionServerMap.size());
rackList.addAll(rackToRegionServerMap.keySet());
Map<String, Integer> currentProcessIndexMap = new HashMap<String, Integer>();
int rackIndex = 0;
for (HRegionInfo regionInfo : regions) {
String rackName = rackList.get(rackIndex);
// Initialize the current processing host index.
int serverIndex = 0;
// Restore the current process index from the currentProcessIndexMap
Integer currentProcessIndex = currentProcessIndexMap.get(rackName);
if (currentProcessIndex != null) {
serverIndex = currentProcessIndex.intValue();
}
// Get the server list for the current rack
List<ServerName> currentServerList = rackToRegionServerMap.get(rackName);
// Get the current process region server
ServerName currentServer = currentServerList.get(serverIndex);
// Place the current region with the current primary region server
primaryRSMap.put(regionInfo, currentServer);
List<HRegionInfo> regionsForServer = assignmentMap.get(currentServer);
if (regionsForServer == null) {
regionsForServer = new ArrayList<HRegionInfo>();
assignmentMap.put(currentServer, regionsForServer);
}
regionsForServer.add(regionInfo);
// Set the next processing index
if ((++serverIndex) >= currentServerList.size()) {
// Reset the server index for the current rack
serverIndex = 0;
}
// Keep track of the next processing index
currentProcessIndexMap.put(rackName, serverIndex);
if ((++rackIndex) >= rackList.size()) {
rackIndex = 0; // reset the rack index to 0
}
}
}
Map<HRegionInfo, ServerName[]> placeSecondaryAndTertiaryRS(
Map<HRegionInfo, ServerName> primaryRSMap) {
Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
new HashMap<HRegionInfo, ServerName[]>();
for (Map.Entry<HRegionInfo, ServerName> entry : primaryRSMap.entrySet()) {
// Get the target region and its primary region server rack
HRegionInfo regionInfo = entry.getKey();
ServerName primaryRS = entry.getValue();
try {
// Create the secondary and tertiary region server pair object.
ServerName[] favoredNodes;
// Get the rack for the primary region server
String primaryRack = rackManager.getRack(primaryRS);
if (getTotalNumberOfRacks() == 1) {
favoredNodes = singleRackCase(regionInfo, primaryRS, primaryRack);
} else {
favoredNodes = multiRackCase(regionInfo, primaryRS, primaryRack);
}
if (favoredNodes != null) {
secondaryAndTertiaryMap.put(regionInfo, favoredNodes);
LOG.debug("Place the secondary and tertiary region server for region "
+ regionInfo.getRegionNameAsString());
}
} catch (Exception e) {
LOG.warn("Cannot place the favored nodes for region " +
regionInfo.getRegionNameAsString() + " because " + e);
continue;
}
}
return secondaryAndTertiaryMap;
}
private ServerName[] singleRackCase(HRegionInfo regionInfo,
ServerName primaryRS,
String primaryRack) throws IOException {
// Single rack case: have to pick the secondary and tertiary
// from the same rack
List<ServerName> serverList = getServersFromRack(primaryRack);
if (serverList.size() <= 2) {
// Single region server case: cannot not place the favored nodes
// on any server; !domain.canPlaceFavoredNodes()
return null;
} else {
// Randomly select two region servers from the server list and make sure
// they are not overlap with the primary region server;
Set<ServerName> serverSkipSet = new HashSet<ServerName>();
serverSkipSet.add(primaryRS);
// Place the secondary RS
ServerName secondaryRS = getOneRandomServer(primaryRack, serverSkipSet);
// Skip the secondary for the tertiary placement
serverSkipSet.add(secondaryRS);
// Place the tertiary RS
ServerName tertiaryRS =
getOneRandomServer(primaryRack, serverSkipSet);
if (secondaryRS == null || tertiaryRS == null) {
LOG.error("Cannot place the secondary and terinary" +
"region server for region " +
regionInfo.getRegionNameAsString());
}
// Create the secondary and tertiary pair
ServerName[] favoredNodes = new ServerName[2];
favoredNodes[0] = secondaryRS;
favoredNodes[1] = tertiaryRS;
return favoredNodes;
}
}
private ServerName[] multiRackCase(HRegionInfo regionInfo,
ServerName primaryRS,
String primaryRack) throws IOException {
// Random to choose the secondary and tertiary region server
// from another rack to place the secondary and tertiary
// Random to choose one rack except for the current rack
Set<String> rackSkipSet = new HashSet<String>();
rackSkipSet.add(primaryRack);
ServerName[] favoredNodes = new ServerName[2];
String secondaryRack = getOneRandomRack(rackSkipSet);
List<ServerName> serverList = getServersFromRack(secondaryRack);
if (serverList.size() >= 2) {
// Randomly pick up two servers from this secondary rack
// Place the secondary RS
ServerName secondaryRS = getOneRandomServer(secondaryRack);
// Skip the secondary for the tertiary placement
Set<ServerName> skipServerSet = new HashSet<ServerName>();
skipServerSet.add(secondaryRS);
// Place the tertiary RS
ServerName tertiaryRS = getOneRandomServer(secondaryRack, skipServerSet);
if (secondaryRS == null || tertiaryRS == null) {
LOG.error("Cannot place the secondary and terinary" +
"region server for region " +
regionInfo.getRegionNameAsString());
}
// Create the secondary and tertiary pair
favoredNodes[0] = secondaryRS;
favoredNodes[1] = tertiaryRS;
} else {
// Pick the secondary rs from this secondary rack
// and pick the tertiary from another random rack
favoredNodes[0] = getOneRandomServer(secondaryRack);
// Pick the tertiary
if (getTotalNumberOfRacks() == 2) {
// Pick the tertiary from the same rack of the primary RS
Set<ServerName> serverSkipSet = new HashSet<ServerName>();
serverSkipSet.add(primaryRS);
favoredNodes[1] = getOneRandomServer(primaryRack, serverSkipSet);
} else {
// Pick the tertiary from another rack
rackSkipSet.add(secondaryRack);
String tertiaryRandomRack = getOneRandomRack(rackSkipSet);
favoredNodes[1] = getOneRandomServer(tertiaryRandomRack);
}
}
return favoredNodes;
}
boolean canPlaceFavoredNodes() {
int serverSize = this.regionServerToRackMap.size();
return (serverSize >= FAVORED_NODES_NUM);
}
void initialize() {
for (ServerName sn : this.servers) {
String rackName = this.rackManager.getRack(sn);
List<ServerName> serverList = this.rackToRegionServerMap.get(rackName);
if (serverList == null) {
serverList = new ArrayList<ServerName>();
// Add the current rack to the unique rack list
this.uniqueRackList.add(rackName);
}
if (!serverList.contains(sn)) {
serverList.add(sn);
this.rackToRegionServerMap.put(rackName, serverList);
this.regionServerToRackMap.put(sn, rackName);
}
}
}
private int getTotalNumberOfRacks() {
return this.uniqueRackList.size();
}
private List<ServerName> getServersFromRack(String rack) {
return this.rackToRegionServerMap.get(rack);
}
private ServerName getOneRandomServer(String rack,
Set<ServerName> skipServerSet) throws IOException {
if(rack == null) return null;
List<ServerName> serverList = this.rackToRegionServerMap.get(rack);
if (serverList == null) return null;
// Get a random server except for any servers from the skip set
if (skipServerSet != null && serverList.size() <= skipServerSet.size()) {
throw new IOException("Cannot randomly pick another random server");
}
ServerName randomServer;
do {
int randomIndex = random.nextInt(serverList.size());
randomServer = serverList.get(randomIndex);
} while (skipServerSet != null && skipServerSet.contains(randomServer));
return randomServer;
}
private ServerName getOneRandomServer(String rack) throws IOException {
return this.getOneRandomServer(rack, null);
}
private String getOneRandomRack(Set<String> skipRackSet) throws IOException {
if (skipRackSet == null || uniqueRackList.size() <= skipRackSet.size()) {
throw new IOException("Cannot randomly pick another random server");
}
String randomRack;
do {
int randomIndex = random.nextInt(this.uniqueRackList.size());
randomRack = this.uniqueRackList.get(randomIndex);
} while (skipRackSet.contains(randomRack));
return randomRack;
}
}

View File

@ -0,0 +1,155 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.balancer;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.RegionPlan;
/**
* An implementation of the {@link LoadBalancer} that assigns favored nodes for
* each region. There is a Primary RegionServer that hosts the region, and then
* there is Secondary and Tertiary RegionServers. Currently, the favored nodes
* information is used in creating HDFS files - the Primary RegionServer passes
* the primary, secondary, tertiary node addresses as hints to the DistributedFileSystem
* API for creating files on the filesystem. These nodes are treated as hints by
* the HDFS to place the blocks of the file. This alleviates the problem to do with
* reading from remote nodes (since we can make the Secondary RegionServer as the new
* Primary RegionServer) after a region is recovered. This should help provide consistent
* read latencies for the regions even when their primary region servers die.
*
*/
@InterfaceAudience.Private
public class FavoredNodeLoadBalancer extends BaseLoadBalancer {
private static final Log LOG = LogFactory.getLog(FavoredNodeLoadBalancer.class);
private FavoredNodes globalFavoredNodesAssignmentPlan;
private Configuration configuration;
@Override
public void setConf(Configuration conf) {
this.configuration = conf;
globalFavoredNodesAssignmentPlan = new FavoredNodes();
}
@Override
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState) {
//TODO. At a high level, this should look at the block locality per region, and
//then reassign regions based on which nodes have the most blocks of the region
//file(s). There could be different ways like minimize region movement, or, maximum
//locality, etc. The other dimension to look at is whether Stochastic loadbalancer
//can be integrated with this
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public Map<ServerName, List<HRegionInfo>> roundRobinAssignment(List<HRegionInfo> regions,
List<ServerName> servers) {
Map<ServerName, List<HRegionInfo>> assignmentMap;
try {
FavoredNodeAssignmentHelper assignmentHelper =
new FavoredNodeAssignmentHelper(servers, configuration);
assignmentHelper.initialize();
if (!assignmentHelper.canPlaceFavoredNodes()) {
return super.roundRobinAssignment(regions, servers);
}
assignmentMap = new HashMap<ServerName, List<HRegionInfo>>();
roundRobinAssignmentImpl(assignmentHelper, assignmentMap, regions, servers);
} catch (Exception ex) {
LOG.warn("Encountered exception while doing favored-nodes assignment " + ex +
" Falling back to regular assignment");
assignmentMap = super.roundRobinAssignment(regions, servers);
}
return assignmentMap;
}
@Override
public ServerName randomAssignment(HRegionInfo regionInfo, List<ServerName> servers) {
try {
FavoredNodeAssignmentHelper assignmentHelper =
new FavoredNodeAssignmentHelper(servers, configuration);
assignmentHelper.initialize();
ServerName primary = super.randomAssignment(regionInfo, servers);
if (!assignmentHelper.canPlaceFavoredNodes()) {
return primary;
}
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
regions.add(regionInfo);
Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>(1);
primaryRSMap.put(regionInfo, primary);
assignSecondaryAndTertiaryNodesForRegion(assignmentHelper, regions, primaryRSMap);
return primary;
} catch (Exception ex) {
LOG.warn("Encountered exception while doing favored-nodes (random)assignment " + ex +
" Falling back to regular assignment");
return super.randomAssignment(regionInfo, servers);
}
}
public List<ServerName> getFavoredNodes(HRegionInfo regionInfo) {
return this.globalFavoredNodesAssignmentPlan.getFavoredNodes(regionInfo);
}
private void roundRobinAssignmentImpl(FavoredNodeAssignmentHelper assignmentHelper,
Map<ServerName, List<HRegionInfo>> assignmentMap,
List<HRegionInfo> regions, List<ServerName> servers) throws IOException {
Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>();
// figure the primary RSs
assignmentHelper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
assignSecondaryAndTertiaryNodesForRegion(assignmentHelper, regions, primaryRSMap);
}
private void assignSecondaryAndTertiaryNodesForRegion(
FavoredNodeAssignmentHelper assignmentHelper,
List<HRegionInfo> regions, Map<HRegionInfo, ServerName> primaryRSMap) {
// figure the secondary and tertiary RSs
Map<HRegionInfo, ServerName[]> secondaryAndTertiaryRSMap =
assignmentHelper.placeSecondaryAndTertiaryRS(primaryRSMap);
// now record all the assignments so that we can serve queries later
for (HRegionInfo region : regions) {
List<ServerName> favoredNodesForRegion = new ArrayList<ServerName>(3);
favoredNodesForRegion.add(primaryRSMap.get(region));
ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(region);
if (secondaryAndTertiaryNodes != null) {
favoredNodesForRegion.add(secondaryAndTertiaryNodes[0]);
favoredNodesForRegion.add(secondaryAndTertiaryNodes[1]);
}
globalFavoredNodesAssignmentPlan.updateFavoredNodesMap(region, favoredNodesForRegion);
}
}
void noteFavoredNodes(final Map<HRegionInfo, ServerName[]> favoredNodesMap) {
for (Map.Entry<HRegionInfo, ServerName[]> entry : favoredNodesMap.entrySet()) {
globalFavoredNodesAssignmentPlan.updateFavoredNodesMap(entry.getKey(),
Arrays.asList(entry.getValue()));
}
}
}

View File

@ -0,0 +1,76 @@
/**
* Copyright 2012 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.balancer;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.jboss.netty.util.internal.ConcurrentHashMap;
/**
* This class contains the mapping information between each region and
* its favored region server list. Used by {@link FavoredNodeLoadBalancer} set
* of classes and from unit tests (hence the class is public)
*
* All the access to this class is thread-safe.
*/
@InterfaceAudience.Private
public class FavoredNodes {
protected static final Log LOG = LogFactory.getLog(
FavoredNodes.class.getName());
/** the map between each region and its favored region server list */
private Map<HRegionInfo, List<ServerName>> favoredNodesMap;
public static enum Position {
PRIMARY,
SECONDARY,
TERTIARY;
};
public FavoredNodes() {
favoredNodesMap = new ConcurrentHashMap<HRegionInfo, List<ServerName>>();
}
/**
* Add an assignment to the plan
* @param region
* @param servers
*/
public synchronized void updateFavoredNodesMap(HRegionInfo region,
List<ServerName> servers) {
if (region == null || servers == null || servers.size() ==0)
return;
this.favoredNodesMap.put(region, servers);
}
/**
* @param region
* @return the list of favored region server for this region based on the plan
*/
public synchronized List<ServerName> getFavoredNodes(HRegionInfo region) {
return favoredNodesMap.get(region);
}
}

View File

@ -30,7 +30,7 @@ import org.apache.hadoop.util.ReflectionUtils;
public class LoadBalancerFactory {
/**
* Create a loadblanacer from the given conf.
* Create a loadbalancer from the given conf.
* @param conf
* @return A {@link LoadBalancer}
*/

View File

@ -280,6 +280,18 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
protected final Map<String, HRegion> onlineRegions =
new ConcurrentHashMap<String, HRegion>();
/**
* Map of encoded region names to the DataNode locations they should be hosted on
* We store the value as InetSocketAddress since this is used only in HDFS
* API (create() that takes favored nodes as hints for placing file blocks).
* We could have used ServerName here as the value class, but we'd need to
* convert it to InetSocketAddress at some point before the HDFS API call, and
* it seems a bit weird to store ServerName since ServerName refers to RegionServers
* and here we really mean DataNode locations.
*/
protected final Map<String, InetSocketAddress[]> regionFavoredNodesMap =
new ConcurrentHashMap<String, InetSocketAddress[]>();
// Leases
protected Leases leases;
@ -2425,6 +2437,10 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
return this.onlineRegions.get(encodedRegionName);
}
public InetSocketAddress[] getRegionBlockLocations(final String encodedRegionName) {
return this.regionFavoredNodesMap.get(encodedRegionName);
}
@Override
public HRegion getFromOnlineRegions(final String encodedRegionName) {
return this.onlineRegions.get(encodedRegionName);
@ -2447,6 +2463,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
}
addToMovedRegions(r.getRegionInfo().getEncodedName(), destination, closeSeqNum);
}
this.regionFavoredNodesMap.remove(r.getRegionInfo().getEncodedName());
return toReturn != null;
}
@ -3438,6 +3455,8 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
this.service.submit(new OpenMetaHandler(this, this, region, htd,
versionOfOfflineNode));
} else {
updateRegionFavoredNodesMapping(region.getEncodedName(),
regionOpenInfo.getFavoredNodesList());
this.service.submit(new OpenRegionHandler(this, this, region, htd,
versionOfOfflineNode));
}
@ -3458,6 +3477,28 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
return builder.build();
}
private void updateRegionFavoredNodesMapping(String encodedRegionName,
List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> favoredNodes) {
InetSocketAddress[] addr = new InetSocketAddress[favoredNodes.size()];
// Refer to the comment on the declaration of regionFavoredNodesMap on why
// it is a map of region name to InetSocketAddress[]
for (int i = 0; i < favoredNodes.size(); i++) {
addr[i] = InetSocketAddress.createUnresolved(favoredNodes.get(i).getHostName(),
favoredNodes.get(i).getPort());
}
regionFavoredNodesMap.put(encodedRegionName, addr);
}
/**
* Return the favored nodes for a region given its encoded name. Look at the
* comment around {@link #regionFavoredNodesMap} on why it is InetSocketAddress[]
* @param encodedRegionName
* @return array of favored locations
*/
public InetSocketAddress[] getFavoredNodesForRegion(String encodedRegionName) {
return regionFavoredNodesMap.get(encodedRegionName);
}
/**
* Close a region on the region server.
*

View File

@ -159,9 +159,9 @@ public class TestAssignmentManager {
Mockito.when(this.serverManager.sendRegionClose(SERVERNAME_B, REGIONINFO, -1)).
thenReturn(true);
// Ditto on open.
Mockito.when(this.serverManager.sendRegionOpen(SERVERNAME_A, REGIONINFO, -1)).
Mockito.when(this.serverManager.sendRegionOpen(SERVERNAME_A, REGIONINFO, -1, null)).
thenReturn(RegionOpeningState.OPENED);
Mockito.when(this.serverManager.sendRegionOpen(SERVERNAME_B, REGIONINFO, -1)).
Mockito.when(this.serverManager.sendRegionOpen(SERVERNAME_B, REGIONINFO, -1, null)).
thenReturn(RegionOpeningState.OPENED);
this.master = Mockito.mock(HMaster.class);

View File

@ -188,7 +188,7 @@ public class TestMasterNoCluster {
// Fake a successful open.
Mockito.doReturn(RegionOpeningState.OPENED).when(spy).
sendRegionOpen((ServerName)Mockito.any(), (HRegionInfo)Mockito.any(),
Mockito.anyInt());
Mockito.anyInt(), Mockito.anyListOf(ServerName.class));
return spy;
}
@ -274,7 +274,7 @@ public class TestMasterNoCluster {
// Fake a successful open.
Mockito.doReturn(RegionOpeningState.OPENED).when(spy).
sendRegionOpen((ServerName)Mockito.any(), (HRegionInfo)Mockito.any(),
Mockito.anyInt());
Mockito.anyInt(), Mockito.anyListOf(ServerName.class));
return spy;
}

View File

@ -0,0 +1,287 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.MetaScanner;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer;
import org.apache.hadoop.hbase.master.balancer.FavoredNodes.Position;
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category(MediumTests.class)
public class TestRegionPlacement {
final static Log LOG = LogFactory.getLog(TestRegionPlacement.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final static int SLAVES = 4;
private static HBaseAdmin admin;
private static Position[] positions = Position.values();
private int REGION_NUM = 10;
private Map<HRegionInfo, ServerName[]> favoredNodesAssignmentPlan =
new HashMap<HRegionInfo, ServerName[]>();
@BeforeClass
public static void setupBeforeClass() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
// Enable the favored nodes based load balancer
conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
FavoredNodeLoadBalancer.class, LoadBalancer.class);
TEST_UTIL.startMiniCluster(SLAVES);
admin = new HBaseAdmin(conf);
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Test
public void testGetFavoredNodes() {
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
HRegionInfo regionInfo = new HRegionInfo("oneregion".getBytes());
List<ServerName> servers = new ArrayList<ServerName>();
for (int i = 0; i < 10; i++) {
ServerName server = new ServerName("foo"+i+":1234",-1);
servers.add(server);
}
// test that we have enough favored nodes after we call randomAssignment
balancer.randomAssignment(regionInfo, servers);
assertTrue(((FavoredNodeLoadBalancer)balancer).getFavoredNodes(regionInfo).size() == 3);
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(100);
for (int i = 0; i < 100; i++) {
HRegionInfo region = new HRegionInfo(("foobar"+i).getBytes());
regions.add(region);
}
// test that we have enough favored nodes after we call roundRobinAssignment
balancer.roundRobinAssignment(regions, servers);
for (int i = 0; i < 100; i++) {
assertTrue(((FavoredNodeLoadBalancer)balancer).getFavoredNodes(regions.get(i)).size() == 3);
}
}
@Test(timeout = 180000)
public void testRegionPlacement() throws Exception {
// Create a table with REGION_NUM regions.
createTable("testRegionAssignment", REGION_NUM);
TEST_UTIL.waitTableAvailable(Bytes.toBytes("testRegionAssignment"));
// Verify all the user regions are assigned to the primary region server
// based on the plan
countRegionOnPrimaryRS(REGION_NUM);
// Verify all the region server are update with the latest favored nodes
verifyRegionServerUpdated();
}
/**
* Verify the number of user regions is assigned to the primary
* region server based on the plan is expected
* @param expectedNum.
* @throws IOException
*/
private void countRegionOnPrimaryRS(int expectedNum)
throws IOException {
int lastRegionOnPrimaryRSCount = getNumRegionisOnPrimaryRS();
assertEquals("Only " + expectedNum + " of user regions running " +
"on the primary region server", expectedNum ,
lastRegionOnPrimaryRSCount);
}
/**
* Verify all the online region servers has been updated to the
* latest assignment plan
* @param plan
* @throws IOException
*/
private void verifyRegionServerUpdated() throws IOException {
// Verify all region servers contain the correct favored nodes information
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
for (int i = 0; i < SLAVES; i++) {
HRegionServer rs = cluster.getRegionServer(i);
for (HRegion region: rs.getOnlineRegions(Bytes.toBytes("testRegionAssignment"))) {
InetSocketAddress[] favoredSocketAddress = rs.getFavoredNodesForRegion(
region.getRegionInfo().getEncodedName());
ServerName[] favoredServerList = favoredNodesAssignmentPlan.get(region.getRegionInfo());
// All regions are supposed to have favored nodes,
// except for META and ROOT
if (favoredServerList == null) {
HTableDescriptor desc = region.getTableDesc();
// Verify they are ROOT and META regions since no favored nodes
assertNull(favoredSocketAddress);
assertTrue("User region " +
region.getTableDesc().getNameAsString() +
" should have favored nodes",
(desc.isRootRegion() || desc.isMetaRegion()));
} else {
// For user region, the favored nodes in the region server should be
// identical to favored nodes in the assignmentPlan
assertTrue(favoredSocketAddress.length == favoredServerList.length);
assertTrue(favoredServerList.length > 0);
for (int j = 0; j < favoredServerList.length; j++) {
InetSocketAddress addrFromRS = favoredSocketAddress[j];
InetSocketAddress addrFromPlan = InetSocketAddress.createUnresolved(
favoredServerList[j].getHostname(), favoredServerList[j].getPort());
assertNotNull(addrFromRS);
assertNotNull(addrFromPlan);
assertTrue("Region server " + rs.getServerName().getHostAndPort()
+ " has the " + positions[j] +
" for region " + region.getRegionNameAsString() + " is " +
addrFromRS + " which is inconsistent with the plan "
+ addrFromPlan, addrFromRS.equals(addrFromPlan));
}
}
}
}
}
/**
* Check whether regions are assigned to servers consistent with the explicit
* hints that are persisted in the META table.
* Also keep track of the number of the regions are assigned to the
* primary region server.
* @return the number of regions are assigned to the primary region server
* @throws IOException
*/
private int getNumRegionisOnPrimaryRS() throws IOException {
final AtomicInteger regionOnPrimaryNum = new AtomicInteger(0);
final AtomicInteger totalRegionNum = new AtomicInteger(0);
LOG.info("The start of region placement verification");
MetaScannerVisitor visitor = new MetaScannerVisitor() {
public boolean processRow(Result result) throws IOException {
try {
HRegionInfo info = MetaScanner.getHRegionInfo(result);
byte[] server = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.SERVER_QUALIFIER);
byte[] startCode = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.STARTCODE_QUALIFIER);
byte[] favoredNodes = result.getValue(HConstants.CATALOG_FAMILY,
FavoredNodeAssignmentHelper.FAVOREDNODES_QUALIFIER);
// Add the favored nodes into assignment plan
ServerName[] favoredServerList =
FavoredNodeAssignmentHelper.getFavoredNodesList(favoredNodes);
favoredNodesAssignmentPlan.put(info, favoredServerList);
Position[] positions = Position.values();
if (info != null) {
totalRegionNum.incrementAndGet();
if (server != null) {
ServerName serverName =
new ServerName(Bytes.toString(server),Bytes.toLong(startCode));
if (favoredNodes != null) {
String placement = "[NOT FAVORED NODE]";
for (int i = 0; i < favoredServerList.length; i++) {
if (favoredServerList[i].equals(serverName)) {
placement = positions[i].toString();
if (i == Position.PRIMARY.ordinal()) {
regionOnPrimaryNum.incrementAndGet();
}
break;
}
}
LOG.info(info.getRegionNameAsString() + " on " +
serverName + " " + placement);
} else {
LOG.info(info.getRegionNameAsString() + " running on " +
serverName + " but there is no favored region server");
}
} else {
LOG.info(info.getRegionNameAsString() +
" not assigned to any server");
}
}
return true;
} catch (RuntimeException e) {
LOG.error("Result=" + result);
throw e;
}
}
@Override
public void close() throws IOException {}
};
MetaScanner.metaScan(TEST_UTIL.getConfiguration(), visitor);
LOG.info("There are " + regionOnPrimaryNum.intValue() + " out of " +
totalRegionNum.intValue() + " regions running on the primary" +
" region servers" );
return regionOnPrimaryNum.intValue() ;
}
/**
* Create a table with specified table name and region number.
* @param table
* @param regionNum
* @return
* @throws IOException
*/
private static void createTable(String table, int regionNum)
throws IOException {
byte[] tableName = Bytes.toBytes(table);
int expectedRegions = regionNum;
byte[][] splitKeys = new byte[expectedRegions - 1][];
for (int i = 1; i < expectedRegions; i++) {
byte splitKey = (byte) i;
splitKeys[i - 1] = new byte[] { splitKey, splitKey, splitKey };
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
admin.createTable(desc, splitKeys);
HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
Map<HRegionInfo, ServerName> regions = ht.getRegionLocations();
assertEquals("Tried to create " + expectedRegions + " regions "
+ "but only found " + regions.size(), expectedRegions, regions.size());
}
}

View File

@ -0,0 +1,311 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.balancer;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.master.RackManager;
import org.apache.hadoop.hbase.util.Triple;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
@Category(MediumTests.class)
public class TestFavoredNodeAssignmentHelper {
private static List<ServerName> servers = new ArrayList<ServerName>();
private static Map<String, List<ServerName>> rackToServers = new HashMap<String,
List<ServerName>>();
private static RackManager rackManager = Mockito.mock(RackManager.class);
@BeforeClass
public static void setupBeforeClass() throws Exception {
// Set up some server -> rack mappings
// Have three racks in the cluster with 10 hosts each.
for (int i = 0; i < 40; i++) {
ServerName server = new ServerName("foo"+i+":1234",-1);
if (i < 10) {
Mockito.when(rackManager.getRack(server)).thenReturn("rack1");
if (rackToServers.get("rack1") == null) {
List<ServerName> servers = new ArrayList<ServerName>();
rackToServers.put("rack1", servers);
}
rackToServers.get("rack1").add(server);
}
if (i >= 10 && i < 20) {
Mockito.when(rackManager.getRack(server)).thenReturn("rack2");
if (rackToServers.get("rack2") == null) {
List<ServerName> servers = new ArrayList<ServerName>();
rackToServers.put("rack2", servers);
}
rackToServers.get("rack2").add(server);
}
if (i >= 20 && i < 30) {
Mockito.when(rackManager.getRack(server)).thenReturn("rack3");
if (rackToServers.get("rack3") == null) {
List<ServerName> servers = new ArrayList<ServerName>();
rackToServers.put("rack3", servers);
}
rackToServers.get("rack3").add(server);
}
servers.add(server);
}
}
// The tests decide which racks to work with, and how many machines to
// work with from any given rack
// Return a rondom 'count' number of servers from 'rack'
private static List<ServerName> getServersFromRack(Map<String, Integer> rackToServerCount) {
List<ServerName> chosenServers = new ArrayList<ServerName>();
for (Map.Entry<String, Integer> entry : rackToServerCount.entrySet()) {
List<ServerName> servers = rackToServers.get(entry.getKey());
for (int i = 0; i < entry.getValue(); i++) {
chosenServers.add(servers.get(i));
}
}
return chosenServers;
}
@Test
public void testSmallCluster() {
// Test the case where we cannot assign favored nodes (because the number
// of nodes in the cluster is too less)
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
rackToServerCount.put("rack1", 2);
List<ServerName> servers = getServersFromRack(rackToServerCount);
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers,
new Configuration());
assertTrue(helper.canPlaceFavoredNodes() == false);
}
@Test
public void testPlacePrimaryRSAsRoundRobin() {
// Test the regular case where there are many servers in different racks
// Test once for few regions and once for many regions
primaryRSPlacement(6, null);
// now create lots of regions and try to place them on the limited number of machines
primaryRSPlacement(600, null);
}
//@Test
public void testSecondaryAndTertiaryPlacementWithSingleRack() {
// Test the case where there is a single rack and we need to choose
// Primary/Secondary/Tertiary from a single rack.
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
rackToServerCount.put("rack1", 10);
// have lots of regions to test with
Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
// although we created lots of regions we should have no overlap on the
// primary/secondary/tertiary for any given region
for (HRegionInfo region : regions) {
ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region);
assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region)));
assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region)));
assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1]));
}
}
@Test
public void testSecondaryAndTertiaryPlacementWithSingleServer() {
// Test the case where we have a single node in the cluster. In this case
// the primary can be assigned but the secondary/tertiary would be null
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
rackToServerCount.put("rack1", 1);
Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
// no secondary/tertiary placement in case of a single RegionServer
assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null);
}
@Test
public void testSecondaryAndTertiaryPlacementWithMultipleRacks() {
// Test the case where we have multiple racks and the region servers
// belong to multiple racks
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
rackToServerCount.put("rack1", 10);
rackToServerCount.put("rack2", 10);
Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
assertTrue(primaryRSMap.size() == 60000);
Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
assertTrue(secondaryAndTertiaryMap.size() == 60000);
// for every region, the primary should be on one rack and the secondary/tertiary
// on another (we create a lot of regions just to increase probability of failure)
for (Map.Entry<HRegionInfo, ServerName[]> entry : secondaryAndTertiaryMap.entrySet()) {
ServerName[] allServersForRegion = entry.getValue();
String primaryRSRack = rackManager.getRack(primaryRSMap.get(entry.getKey()));
String secondaryRSRack = rackManager.getRack(allServersForRegion[0]);
String tertiaryRSRack = rackManager.getRack(allServersForRegion[1]);
assertTrue(!primaryRSRack.equals(secondaryRSRack));
assertTrue(secondaryRSRack.equals(tertiaryRSRack));
}
}
@Test
public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() {
// Test the case where we have two racks but with less than two servers in each
// We will not have enough machines to select secondary/tertiary
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
rackToServerCount.put("rack1", 1);
rackToServerCount.put("rack2", 1);
Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
assertTrue(primaryRSMap.size() == 6);
Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
for (HRegionInfo region : regions) {
// not enough secondary/tertiary room to place the regions
assertTrue(secondaryAndTertiaryMap.get(region) == null);
}
}
@Test
public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack() {
// Test the case where there is only one server in one rack and another rack
// has more servers. We try to choose secondary/tertiary on different
// racks than what the primary is on. But if the other rack doesn't have
// enough nodes to have both secondary/tertiary RSs, the tertiary is placed
// on the same rack as the primary server is on
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
rackToServerCount.put("rack1", 2);
rackToServerCount.put("rack2", 1);
Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
assertTrue(primaryRSMap.size() == 6);
Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
for (HRegionInfo region : regions) {
ServerName s = primaryRSMap.get(region);
ServerName secondaryRS = secondaryAndTertiaryMap.get(region)[0];
ServerName tertiaryRS = secondaryAndTertiaryMap.get(region)[1];
if (rackManager.getRack(s).equals("rack1")) {
assertTrue(rackManager.getRack(secondaryRS).equals("rack2") &&
rackManager.getRack(tertiaryRS).equals("rack1"));
}
if (rackManager.getRack(s).equals("rack2")) {
assertTrue(rackManager.getRack(secondaryRS).equals("rack1") &&
rackManager.getRack(tertiaryRS).equals("rack1"));
}
}
}
private Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
secondaryAndTertiaryRSPlacementHelper(
int regionCount, Map<String, Integer> rackToServerCount) {
Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>();
List<ServerName> servers = getServersFromRack(rackToServerCount);
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers,
new Configuration());
helper = new FavoredNodeAssignmentHelper(servers, new Configuration());
Map<ServerName, List<HRegionInfo>> assignmentMap =
new HashMap<ServerName, List<HRegionInfo>>();
helper.setRackManager(rackManager);
helper.initialize();
// create regions
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionCount);
for (int i = 0; i < regionCount; i++) {
HRegionInfo region = new HRegionInfo(("foobar"+i).getBytes());
regions.add(region);
}
// place the regions
helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
return new Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
(primaryRSMap, helper, regions);
}
private void primaryRSPlacement(int regionCount, Map<HRegionInfo, ServerName> primaryRSMap) {
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
rackToServerCount.put("rack1", 10);
rackToServerCount.put("rack2", 10);
rackToServerCount.put("rack3", 10);
List<ServerName> servers = getServersFromRack(rackToServerCount);
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers,
new Configuration());
helper.setRackManager(rackManager);
helper.initialize();
assertTrue(helper.canPlaceFavoredNodes());
Map<ServerName, List<HRegionInfo>> assignmentMap =
new HashMap<ServerName, List<HRegionInfo>>();
if (primaryRSMap == null) primaryRSMap = new HashMap<HRegionInfo, ServerName>();
// create some regions
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionCount);
for (int i = 0; i < regionCount; i++) {
HRegionInfo region = new HRegionInfo(("foobar" + i).getBytes());
regions.add(region);
}
// place those regions in primary RSs
helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
// we should have all the regions nicely spread across the racks
int regionsOnRack1 = 0;
int regionsOnRack2 = 0;
int regionsOnRack3 = 0;
for (Map.Entry<HRegionInfo, ServerName> entry : primaryRSMap.entrySet()) {
if (rackManager.getRack(entry.getValue()).equals("rack1")) {
regionsOnRack1++;
} else if (rackManager.getRack(entry.getValue()).equals("rack2")) {
regionsOnRack2++;
} else if (rackManager.getRack(entry.getValue()).equals("rack3")) {
regionsOnRack3++;
}
}
int numRegionsPerRack = (int)Math.ceil((double)regionCount/3); //since there are 3 servers
assertTrue(regionsOnRack1 == numRegionsPerRack && regionsOnRack2 == numRegionsPerRack
&& regionsOnRack3 == numRegionsPerRack);
int numServersPerRack = (int)Math.ceil((double)regionCount/30); //since there are 30 servers
for (Map.Entry<ServerName, List<HRegionInfo>> entry : assignmentMap.entrySet()) {
assertTrue(entry.getValue().size() == numServersPerRack);
}
}
}

View File

@ -103,7 +103,7 @@ public class TestRegionServerNoMaster {
// We reopen. We need a ZK node here, as a open is always triggered by a master.
ZKAssign.createNodeOffline(HTU.getZooKeeperWatcher(), hri, getRS().getServerName());
// first version is '0'
AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(hri, 0);
AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(hri, 0, null);
AdminProtos.OpenRegionResponse responseOpen = getRS().openRegion(null, orr);
Assert.assertTrue(responseOpen.getOpeningStateCount() == 1);
Assert.assertTrue(responseOpen.getOpeningState(0).
@ -220,7 +220,7 @@ public class TestRegionServerNoMaster {
// We're sending multiple requests in a row. The region server must handle this nicely.
for (int i = 0; i < 10; i++) {
AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(hri, 0);
AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(hri, 0, null);
AdminProtos.OpenRegionResponse responseOpen = getRS().openRegion(null, orr);
Assert.assertTrue(responseOpen.getOpeningStateCount() == 1);