HBASE-9892 Add info port to ServerName to support multi instances in a node

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1551458 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2013-12-17 04:35:59 +00:00
parent fab098fa47
commit 7ea071db1f
14 changed files with 573 additions and 43 deletions

View File

@ -13953,6 +13953,450 @@ public final class HBaseProtos {
// @@protoc_insertion_point(class_scope:NamespaceDescriptor)
}
public interface RegionServerInfoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional int32 infoPort = 1;
/**
* <code>optional int32 infoPort = 1;</code>
*/
boolean hasInfoPort();
/**
* <code>optional int32 infoPort = 1;</code>
*/
int getInfoPort();
}
/**
* Protobuf type {@code RegionServerInfo}
*
* <pre>
**
* Description of the region server info
* </pre>
*/
public static final class RegionServerInfo extends
com.google.protobuf.GeneratedMessage
implements RegionServerInfoOrBuilder {
// Use RegionServerInfo.newBuilder() to construct.
private RegionServerInfo(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private RegionServerInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final RegionServerInfo defaultInstance;
public static RegionServerInfo getDefaultInstance() {
return defaultInstance;
}
public RegionServerInfo getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RegionServerInfo(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
infoPort_ = input.readInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionServerInfo_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionServerInfo_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo.Builder.class);
}
public static com.google.protobuf.Parser<RegionServerInfo> PARSER =
new com.google.protobuf.AbstractParser<RegionServerInfo>() {
public RegionServerInfo parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new RegionServerInfo(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<RegionServerInfo> getParserForType() {
return PARSER;
}
private int bitField0_;
// optional int32 infoPort = 1;
public static final int INFOPORT_FIELD_NUMBER = 1;
private int infoPort_;
/**
* <code>optional int32 infoPort = 1;</code>
*/
public boolean hasInfoPort() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional int32 infoPort = 1;</code>
*/
public int getInfoPort() {
return infoPort_;
}
private void initFields() {
infoPort_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeInt32(1, infoPort_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(1, infoPort_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo) obj;
boolean result = true;
result = result && (hasInfoPort() == other.hasInfoPort());
if (hasInfoPort()) {
result = result && (getInfoPort()
== other.getInfoPort());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasInfoPort()) {
hash = (37 * hash) + INFOPORT_FIELD_NUMBER;
hash = (53 * hash) + getInfoPort();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code RegionServerInfo}
*
* <pre>
**
* Description of the region server info
* </pre>
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionServerInfo_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionServerInfo_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
infoPort_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionServerInfo_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo build() {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.infoPort_ = infoPort_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo.getDefaultInstance()) return this;
if (other.hasInfoPort()) {
setInfoPort(other.getInfoPort());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional int32 infoPort = 1;
private int infoPort_ ;
/**
* <code>optional int32 infoPort = 1;</code>
*/
public boolean hasInfoPort() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional int32 infoPort = 1;</code>
*/
public int getInfoPort() {
return infoPort_;
}
/**
* <code>optional int32 infoPort = 1;</code>
*/
public Builder setInfoPort(int value) {
bitField0_ |= 0x00000001;
infoPort_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 infoPort = 1;</code>
*/
public Builder clearInfoPort() {
bitField0_ = (bitField0_ & ~0x00000001);
infoPort_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:RegionServerInfo)
}
static {
defaultInstance = new RegionServerInfo(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:RegionServerInfo)
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_TableName_descriptor;
private static
@ -14048,6 +14492,11 @@ public final class HBaseProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_NamespaceDescriptor_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_RegionServerInfo_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_RegionServerInfo_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@ -14092,11 +14541,12 @@ public final class HBaseProtos {
"5\n\004UUID\022\026\n\016least_sig_bits\030\001 \002(\004\022\025\n\rmost_" +
"sig_bits\030\002 \002(\004\"K\n\023NamespaceDescriptor\022\014\n" +
"\004name\030\001 \002(\014\022&\n\rconfiguration\030\002 \003(\0132\017.Nam" +
"eStringPair*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\r" +
"LESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020" +
"\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005" +
"NO_OP\020\006B>\n*org.apache.hadoop.hbase.proto" +
"buf.generatedB\013HBaseProtosH\001\240\001\001"
"eStringPair\"$\n\020RegionServerInfo\022\020\n\010infoP" +
"ort\030\001 \001(\005*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLE" +
"SS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022" +
"\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO" +
"_OP\020\006B>\n*org.apache.hadoop.hbase.protobu",
"f.generatedB\013HBaseProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -14217,6 +14667,12 @@ public final class HBaseProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_NamespaceDescriptor_descriptor,
new java.lang.String[] { "Name", "Configuration", });
internal_static_RegionServerInfo_descriptor =
getDescriptor().getMessageTypes().get(19);
internal_static_RegionServerInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionServerInfo_descriptor,
new java.lang.String[] { "InfoPort", });
return null;
}
};

View File

@ -185,3 +185,10 @@ message NamespaceDescriptor {
required bytes name = 1;
repeated NameStringPair configuration = 2;
}
/**
* Description of the region server info
*/
message RegionServerInfo {
optional int32 infoPort = 1;
}

View File

@ -401,7 +401,6 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
ServerName [] deadServerNames = deadServers.toArray(new ServerName[deadServers.size()]);
Arrays.sort(deadServerNames);
for (ServerName deadServerName: deadServerNames) {
int infoPort = master.getConfiguration().getInt("hbase.regionserver.info.port", 60030);
</%java>
<tr>
<th></th>

View File

@ -282,9 +282,7 @@ if (sl.getTotalCompactingKVs() > 0) {
ServerLoad serverLoad;
</%args>
<%java>
boolean useDefault = (serverLoad == null);
int defaultPort = master.getConfiguration().getInt("hbase.regionserver.info.port", 60030);
int infoPort = useDefault?defaultPort:serverLoad.getInfoServerPort();
int infoPort = master.getRegionServerInfoPort(serverName);
String url = "http://" + serverName.getHostname() + ":" + infoPort + "/";
</%java>

View File

@ -89,7 +89,7 @@ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
<div class="container">
<div class="row inner_header">
<div class="page-header">
<h1>RegionServer <small><% serverName.getHostname() %></small></h1>
<h1>RegionServer <small><% serverName %></small></h1>
</div>
</div>
<div class="row">

View File

@ -143,6 +143,8 @@ public class LocalHBaseCluster {
// clash over default ports.
conf.set(HConstants.MASTER_PORT, "0");
conf.set(HConstants.REGIONSERVER_PORT, "0");
conf.set(HConstants.REGIONSERVER_INFO_PORT, "0");
this.masterClass = (Class<? extends HMaster>)
conf.getClass(HConstants.MASTER_IMPL, masterClass);
// Start the HMasters.

View File

@ -113,6 +113,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
@ -2158,6 +2159,15 @@ MasterServices, Server {
return masterActiveTime;
}
public int getRegionServerInfoPort(final ServerName sn) {
RegionServerInfo info = this.regionServerTracker.getRegionServerInfo(sn);
if (info == null || info.getInfoPort() == 0) {
return conf.getInt(HConstants.REGIONSERVER_INFO_PORT,
HConstants.DEFAULT_REGIONSERVER_INFOPORT);
}
return info.getInfoPort();
}
/**
* @return array of coprocessor SimpleNames.
*/

View File

@ -175,6 +175,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
@ -346,8 +347,8 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
// debugging and unit tests.
protected volatile boolean abortRequested;
// Port we put up the webui on.
protected int webuiport = -1;
// region server static info like info port
private RegionServerInfo.Builder rsInfo;
ConcurrentMap<String, Integer> rowlocks = new ConcurrentHashMap<String, Integer>();
@ -624,6 +625,11 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
this.distributedLogReplay = this.conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY,
HConstants.DEFAULT_DISTRIBUTED_LOG_REPLAY_CONFIG);
this.rsInfo = RegionServerInfo.newBuilder();
// Put up the webui. Webui may come up on port other than configured if
// that port is occupied. Adjust serverInfo if this is the case.
this.rsInfo.setInfoPort(putUpWebUI());
}
/**
@ -1266,9 +1272,10 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
}
}
private void createMyEphemeralNode() throws KeeperException {
ZKUtil.createEphemeralNodeAndWatch(this.zooKeeper, getMyEphemeralNodePath(),
HConstants.EMPTY_BYTE_ARRAY);
private void createMyEphemeralNode() throws KeeperException, IOException {
byte[] data = ProtobufUtil.prependPBMagic(rsInfo.build().toByteArray());
ZKUtil.createEphemeralNodeAndWatch(this.zooKeeper,
getMyEphemeralNodePath(), data);
}
private void deleteMyEphemeralNode() throws KeeperException {
@ -1607,10 +1614,6 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
this.leases.setName(n + ".leaseChecker");
this.leases.start();
// Put up the webui. Webui may come up on port other than configured if
// that port is occupied. Adjust serverInfo if this is the case.
this.webuiport = putUpWebUI();
if (this.replicationSourceHandler == this.replicationSinkHandler &&
this.replicationSourceHandler != null) {
this.replicationSourceHandler.startReplicationService();
@ -1674,7 +1677,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
port++;
}
}
return port;
return this.infoServer.getPort();
}
/*
@ -4021,7 +4024,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
final GetServerInfoRequest request) throws ServiceException {
ServerName serverName = getServerName();
requestCount.increment();
return ResponseConverter.buildGetServerInfoResponse(serverName, webuiport);
return ResponseConverter.buildGetServerInfoResponse(serverName, rsInfo.getInfoPort());
}
// End Admin methods

View File

@ -51,6 +51,13 @@ public class RSDumpServlet extends StateDumpServlet {
assert hrsconf != null : "No RS conf in context";
response.setContentType("text/plain");
if (!hrs.isOnline()) {
response.getWriter().write("The RegionServer is initializing!");
response.getWriter().close();
return;
}
OutputStream os = response.getOutputStream();
PrintWriter out = new PrintWriter(os);

View File

@ -41,6 +41,13 @@ public class RSStatusServlet extends HttpServlet {
assert hrs != null : "No RS in context!";
resp.setContentType("text/html");
if (!hrs.isOnline()) {
resp.getWriter().write("The RegionServer is initializing!");
resp.getWriter().close();
return;
}
RSStatusTmpl tmpl = new RSStatusTmpl();
if (req.getParameter("format") != null)
tmpl.setFormat(req.getParameter("format"));

View File

@ -551,6 +551,7 @@ class FSHLog implements HLog, Syncable {
// perform the costly sync before we get the lock to roll writers.
try {
nextWriter.sync();
postSync();
} catch (IOException e) {
// optimization failed, no need to abort here.
LOG.warn("pre-sync failed", e);
@ -1125,6 +1126,7 @@ class FSHLog implements HLog, Syncable {
for (Entry e : pendWrites) {
writer.append(e);
}
postAppend(pendWrites);
} catch(IOException e) {
LOG.error("Error while AsyncWriter write, request close of hlog ", e);
requestLogRoll();
@ -1204,8 +1206,11 @@ class FSHLog implements HLog, Syncable {
long now = EnvironmentEdgeManager.currentTimeMillis();
try {
this.isSyncing = true;
if (writer != null) writer.sync();
if (writer != null) {
writer.sync();
}
this.isSyncing = false;
postSync();
} catch (IOException e) {
LOG.fatal("Error while AsyncSyncer sync, request close of hlog ", e);
requestLogRoll();

View File

@ -21,7 +21,9 @@ package org.apache.hadoop.hbase.zookeeper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.NavigableMap;
import java.util.NavigableSet;
import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.commons.logging.Log;
@ -30,6 +32,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.zookeeper.KeeperException;
/**
@ -45,7 +50,8 @@ import org.apache.zookeeper.KeeperException;
@InterfaceAudience.Private
public class RegionServerTracker extends ZooKeeperListener {
private static final Log LOG = LogFactory.getLog(RegionServerTracker.class);
private NavigableSet<ServerName> regionServers = new TreeSet<ServerName>();
private NavigableMap<ServerName, RegionServerInfo> regionServers =
new TreeMap<ServerName, RegionServerInfo>();
private ServerManager serverManager;
private Abortable abortable;
@ -76,7 +82,25 @@ public class RegionServerTracker extends ZooKeeperListener {
this.regionServers.clear();
for (String n: servers) {
ServerName sn = ServerName.parseServerName(ZKUtil.getNodeName(n));
this.regionServers.add(sn);
if (regionServers.get(sn) == null) {
RegionServerInfo.Builder rsInfoBuilder = RegionServerInfo.newBuilder();
try {
String nodePath = ZKUtil.joinZNode(watcher.rsZNode, n);
byte[] data = ZKUtil.getData(watcher, nodePath);
if (LOG.isDebugEnabled()) {
LOG.debug("RS node: " + nodePath + " data: " + Bytes.toString(data));
}
if (data != null && data.length > 0 && ProtobufUtil.isPBMagicPrefix(data)) {
int magicLen = ProtobufUtil.lengthOfPBMagic();
rsInfoBuilder.mergeFrom(data, magicLen, data.length - magicLen);
}
} catch (KeeperException e) {
LOG.warn("Get Rs info port from ephemeral node", e);
} catch (IOException e) {
LOG.warn("Illegal data from ephemeral node", e);
}
this.regionServers.put(sn, rsInfoBuilder.build());
}
}
}
}
@ -119,13 +143,17 @@ public class RegionServerTracker extends ZooKeeperListener {
}
}
public RegionServerInfo getRegionServerInfo(final ServerName sn) {
return regionServers.get(sn);
}
/**
* Gets the online servers.
* @return list of online servers
*/
public List<ServerName> getOnlineServers() {
synchronized (this.regionServers) {
return new ArrayList<ServerName>(this.regionServers);
return new ArrayList<ServerName>(this.regionServers.keySet());
}
}
}

View File

@ -19,7 +19,7 @@
--%>
<%@ page contentType="text/html;charset=UTF-8"
import="static org.apache.commons.lang.StringEscapeUtils.escapeXml"
import="java.util.HashMap"
import="java.util.TreeMap"
import="java.util.Map"
import="org.apache.hadoop.conf.Configuration"
import="org.apache.hadoop.hbase.client.HTable"
@ -50,9 +50,6 @@
if (showFragmentation) {
frags = FSUtils.getTableFragmentation(master);
}
// HARDCODED FOR NOW TODO: FIX GET FROM ZK
// This port might be wrong if RS actually ended up using something else.
int infoPort = conf.getInt("hbase.regionserver.info.port", 60030);
%>
<!--[if IE]>
<!DOCTYPE html>
@ -200,11 +197,11 @@
HRegionInfo meta = HRegionInfo.FIRST_META_REGIONINFO;
ServerName metaLocation = master.getCatalogTracker().waitForMeta(1);
for (int i = 0; i < 1; i++) {
String url = "//" + metaLocation.getHostname() + ":" + infoPort + "/";
String url = "//" + metaLocation.getHostname() + ":" + master.getRegionServerInfoPort(metaLocation) + "/";
%>
<tr>
<td><%= escapeXml(meta.getRegionNameAsString()) %></td>
<td><a href="<%= url %>"><%= metaLocation.getHostname().toString() + ":" + infoPort %></a></td>
<td><a href="<%= url %>"><%= metaLocation.getHostname().toString() + ":" + master.getRegionServerInfoPort(metaLocation) %></a></td>
<td>-</td>
<td><%= escapeXml(Bytes.toString(meta.getStartKey())) %></td>
<td><%= escapeXml(Bytes.toString(meta.getEndKey())) %></td>
@ -252,7 +249,7 @@
<% } %>
</table>
<%
Map<String, Integer> regDistribution = new HashMap<String, Integer>();
Map<ServerName, Integer> regDistribution = new TreeMap<ServerName, Integer>();
Map<HRegionInfo, ServerName> regions = table.getRegionLocations();
if(regions != null && regions.size() > 0) { %>
<%= tableHeader %>
@ -271,22 +268,20 @@
if (map.containsKey(regionInfo.getRegionName())) {
req = map.get(regionInfo.getRegionName()).getRequestsCount();
}
// This port might be wrong if RS actually ended up using something else.
urlRegionServer =
addr.getHostname().toString() + ":" + infoPort;
Integer i = regDistribution.get(urlRegionServer);
Integer i = regDistribution.get(addr);
if (null == i) i = Integer.valueOf(0);
regDistribution.put(urlRegionServer, i+1);
regDistribution.put(addr, i + 1);
}
}
%>
<tr>
<td><%= escapeXml(Bytes.toStringBinary(regionInfo.getRegionName())) %></td>
<%
if (urlRegionServer != null) {
if (addr != null) {
String url = addr.getHostname() + ":" + master.getRegionServerInfoPort(addr) + "/";
%>
<td>
<a href="<%= "//" + urlRegionServer + "/" %>"><%= urlRegionServer %></a>
<a href="<%= url %>"><%= addr.getHostname().toString() + ":" + addr.getPort() %></a>
</td>
<%
} else {
@ -304,10 +299,12 @@
<h2>Regions by Region Server</h2>
<table class="table table-striped"><tr><th>Region Server</th><th>Region Count</th></tr>
<%
for (Map.Entry<String, Integer> rdEntry : regDistribution.entrySet()) {
for (Map.Entry<ServerName, Integer> rdEntry : regDistribution.entrySet()) {
ServerName addr = rdEntry.getKey();
String url = addr.getHostname() + ":" + master.getRegionServerInfoPort(addr) + "/";
%>
<tr>
<td><a href="<%= "//" + rdEntry.getKey() + "/" %>"><%= rdEntry.getKey()%></a></td>
<td><a href="<%= url %>"><%= addr.getHostname().toString() + ":" + addr.getPort() %></a></td>
<td><%= rdEntry.getValue()%></td>
</tr>
<% } %>

View File

@ -55,6 +55,7 @@ import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.yammer.metrics.core.Meter;
import com.yammer.metrics.core.Histogram;
import com.yammer.metrics.core.MetricsRegistry;
import com.yammer.metrics.reporting.ConsoleReporter;
@ -69,8 +70,10 @@ public final class HLogPerformanceEvaluation extends Configured implements Tool
private final MetricsRegistry metrics = new MetricsRegistry();
private final Meter syncMeter =
metrics.newMeter(HLogPerformanceEvaluation.class, "syncMeter", "syncs", TimeUnit.MILLISECONDS);
private final Histogram syncHistogram =
metrics.newHistogram(HLogPerformanceEvaluation.class, "syncHistogram", "nanos-between-syncs", true);
private final Meter appendMeter =
metrics.newMeter(HLogPerformanceEvaluation.class, "append", "bytes", TimeUnit.MILLISECONDS);
metrics.newMeter(HLogPerformanceEvaluation.class, "appendMeter", "bytes", TimeUnit.MILLISECONDS);
private HBaseTestingUtility TEST_UTIL;
@ -244,6 +247,8 @@ public final class HLogPerformanceEvaluation extends Configured implements Tool
final long whenToRoll = roll;
HLog hlog = new FSHLog(fs, rootRegionDir, "wals", getConf()) {
int appends = 0;
long lastSync = 0;
@Override
protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit,
HTableDescriptor htd)
@ -260,6 +265,12 @@ public final class HLogPerformanceEvaluation extends Configured implements Tool
public void postSync() {
super.postSync();
syncMeter.mark();
long now = System.nanoTime();
if (lastSync > 0) {
long diff = now - lastSync;
syncHistogram.update(diff);
}
this.lastSync = now;
}
@Override
@ -274,7 +285,7 @@ public final class HLogPerformanceEvaluation extends Configured implements Tool
HRegion region = null;
try {
region = openRegion(fs, rootRegionDir, htd, hlog);
ConsoleReporter.enable(this.metrics, 1, TimeUnit.SECONDS);
ConsoleReporter.enable(this.metrics, 60, TimeUnit.SECONDS);
long putTime =
runBenchmark(new HLogPutBenchmark(region, htd, numIterations, noSync, syncInterval),
numThreads);