HBASE-6038 Add getClusterStatus PB-based call to HMasterInterface

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1346748 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2012-06-06 06:26:22 +00:00
parent e76e8eb72e
commit 8ad8c97c1d
17 changed files with 5630 additions and 200 deletions

View File

@ -28,12 +28,20 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.HashSet;
import org.apache.hadoop.hbase.HServerLoad;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition;
import org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -43,6 +51,8 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.VersionMismatchException;
import org.apache.hadoop.io.VersionedWritable;
import com.google.protobuf.ByteString;
/**
* Status information on the HBase cluster.
* <p>
@ -78,7 +88,7 @@ public class ClusterStatus extends VersionedWritable {
private static final byte VERSION = 2;
private String hbaseVersion;
private Map<ServerName, HServerLoad> liveServers;
private Map<ServerName, ServerLoad> liveServers;
private Collection<ServerName> deadServers;
private ServerName master;
private Collection<ServerName> backupMasters;
@ -88,7 +98,9 @@ public class ClusterStatus extends VersionedWritable {
/**
* Constructor, for Writable
* @deprecated Used by Writables and Writables are going away.
*/
@Deprecated
public ClusterStatus() {
super();
}
@ -102,40 +114,7 @@ public class ClusterStatus extends VersionedWritable {
final String[] masterCoprocessors) {
this.hbaseVersion = hbaseVersion;
// TODO: This conversion of ServerLoad to HServerLoad is temporary,
// will be cleaned up in HBASE-5445. Using the ClusterStatus proto brings
// in a lot of other changes, so it makes sense to break this up.
Map<ServerName, HServerLoad> convertedLoad =
new HashMap<ServerName,HServerLoad>();
for (Map.Entry<ServerName,ServerLoad> entry : servers.entrySet()) {
ServerLoad sl = entry.getValue();
Map<byte[],RegionLoad> regionLoad = new HashMap<byte[],RegionLoad>();
for (HBaseProtos.RegionLoad rl : sl.getRegionLoadsList()) {
Set<String> regionCoprocessors = new HashSet<String>();
for (HBaseProtos.Coprocessor coprocessor
: rl.getCoprocessorsList()) {
regionCoprocessors.add(coprocessor.getName());
}
byte [] regionName = rl.getRegionSpecifier().getValue().toByteArray();
RegionLoad converted = new RegionLoad(regionName,
rl.getStores(),rl.getStorefiles(),rl.getStoreUncompressedSizeMB(),
rl.getStorefileSizeMB(),rl.getMemstoreSizeMB(),
rl.getStorefileIndexSizeMB(),rl.getRootIndexSizeKB(),
rl.getTotalStaticIndexSizeKB(),rl.getTotalStaticBloomSizeKB(),
rl.getReadRequestsCount(),rl.getWriteRequestsCount(),
rl.getTotalCompactingKVs(),rl.getCurrentCompactedKVs(),
regionCoprocessors);
regionLoad.put(regionName, converted);
}
HServerLoad hsl = new HServerLoad(sl.getTotalNumberOfRequests(),
sl.getRequestsPerSecond(),sl.getUsedHeapMB(),sl.getMaxHeapMB(),
regionLoad,new HashSet<String>(Arrays.asList(masterCoprocessors)));
convertedLoad.put(entry.getKey(), hsl);
}
this.liveServers = convertedLoad;
this.liveServers = servers;
this.deadServers = deadServers;
this.master = master;
this.backupMasters = backupMasters;
@ -178,8 +157,8 @@ public class ClusterStatus extends VersionedWritable {
*/
public int getRegionsCount() {
int count = 0;
for (Map.Entry<ServerName, HServerLoad> e: this.liveServers.entrySet()) {
count += e.getValue().getNumberOfRegions();
for (Map.Entry<ServerName, ServerLoad> e: this.liveServers.entrySet()) {
count += e.getValue().getRegionLoadsCount();
}
return count;
}
@ -189,8 +168,8 @@ public class ClusterStatus extends VersionedWritable {
*/
public int getRequestsCount() {
int count = 0;
for (Map.Entry<ServerName, HServerLoad> e: this.liveServers.entrySet()) {
count += e.getValue().getNumberOfRequests();
for (Map.Entry<ServerName, ServerLoad> e: this.liveServers.entrySet()) {
count += e.getValue().getTotalNumberOfRequests();
}
return count;
}
@ -281,7 +260,7 @@ public class ClusterStatus extends VersionedWritable {
* @return Server's load or null if not found.
*/
public HServerLoad getLoad(final ServerName sn) {
return this.liveServers.get(sn);
return HServerLoad.convert(this.liveServers.get(sn));
}
public Map<String, RegionState> getRegionsInTransition() {
@ -296,95 +275,74 @@ public class ClusterStatus extends VersionedWritable {
return masterCoprocessors;
}
//
// Writable
//
/**
* Convert a ClutserStatus to a protobuf ClusterStatus
*
* @return the protobuf ClusterStatus
*/
public ClusterStatusProtos.ClusterStatus convert() {
ClusterStatusProtos.ClusterStatus.Builder builder = ClusterStatusProtos.ClusterStatus.newBuilder();
builder.setHbaseVersion(HBaseVersionFileContent.newBuilder().setVersion(getHBaseVersion()));
public void write(DataOutput out) throws IOException {
super.write(out);
out.writeUTF(hbaseVersion);
out.writeInt(getServersSize());
for (Map.Entry<ServerName, HServerLoad> e: this.liveServers.entrySet()) {
Bytes.writeByteArray(out, e.getKey().getVersionedBytes());
e.getValue().write(out);
for (Map.Entry<ServerName, ServerLoad> entry : liveServers.entrySet()) {
LiveServerInfo.Builder lsi =
LiveServerInfo.newBuilder().setServer(ProtobufUtil.toServerName(entry.getKey()));
lsi.setServerLoad(entry.getValue().getServerLoadPB());
builder.addLiveServers(lsi.build());
}
out.writeInt(deadServers.size());
for (ServerName server: deadServers) {
Bytes.writeByteArray(out, server.getVersionedBytes());
for (ServerName deadServer : getDeadServerNames()) {
builder.addDeadServers(ProtobufUtil.toServerName(deadServer));
}
out.writeInt(this.intransition.size());
for (Map.Entry<String, RegionState> e: this.intransition.entrySet()) {
out.writeUTF(e.getKey());
e.getValue().write(out);
for (Map.Entry<String, RegionState> rit : getRegionsInTransition().entrySet()) {
ClusterStatusProtos.RegionState rs = rit.getValue().convert();
RegionSpecifier.Builder spec =
RegionSpecifier.newBuilder().setType(RegionSpecifierType.REGION_NAME);
spec.setValue(ByteString.copyFrom(Bytes.toBytes(rit.getKey())));
RegionInTransition pbRIT =
RegionInTransition.newBuilder().setSpec(spec.build()).setRegionState(rs).build();
builder.addRegionsInTransition(pbRIT);
}
out.writeUTF(clusterId);
out.writeInt(masterCoprocessors.length);
for(String masterCoprocessor: masterCoprocessors) {
out.writeUTF(masterCoprocessor);
builder.setClusterId(new ClusterId(getClusterId()).convert());
for (String coprocessor : getMasterCoprocessors()) {
builder.addMasterCoprocessors(HBaseProtos.Coprocessor.newBuilder().setName(coprocessor));
}
Bytes.writeByteArray(out, this.master.getVersionedBytes());
out.writeInt(this.backupMasters.size());
for (ServerName backupMaster: this.backupMasters) {
Bytes.writeByteArray(out, backupMaster.getVersionedBytes());
builder.setMaster(
ProtobufUtil.toServerName(getMaster()));
for (ServerName backup : getBackupMasters()) {
builder.addBackupMasters(ProtobufUtil.toServerName(backup));
}
return builder.build();
}
public void readFields(DataInput in) throws IOException {
int version = getVersion();
try {
super.readFields(in);
} catch (VersionMismatchException e) {
/*
* No API in VersionMismatchException to get the expected and found
* versions. We use the only tool available to us: toString(), whose
* output has a dependency on hadoop-common. Boo.
*/
int startIndex = e.toString().lastIndexOf('v') + 1;
version = Integer.parseInt(e.toString().substring(startIndex));
/**
* Convert a protobuf ClusterStatus to a ClusterStatus
*
* @param proto the protobuf ClusterStatus
* @return the converted ClusterStatus
*/
public static ClusterStatus convert(ClusterStatusProtos.ClusterStatus proto) {
Map<ServerName, ServerLoad> servers = new HashMap<ServerName, ServerLoad>();
for (LiveServerInfo lsi : proto.getLiveServersList()) {
servers.put(ProtobufUtil.toServerName(lsi.getServer()), new ServerLoad(lsi.getServerLoad()));
}
hbaseVersion = in.readUTF();
int count = in.readInt();
this.liveServers = new HashMap<ServerName, HServerLoad>(count);
for (int i = 0; i < count; i++) {
byte [] versionedBytes = Bytes.readByteArray(in);
HServerLoad hsl = new HServerLoad();
hsl.readFields(in);
this.liveServers.put(ServerName.parseVersionedServerName(versionedBytes), hsl);
Collection<ServerName> deadServers = new LinkedList<ServerName>();
for (HBaseProtos.ServerName sn : proto.getDeadServersList()) {
deadServers.add(ProtobufUtil.toServerName(sn));
}
count = in.readInt();
deadServers = new ArrayList<ServerName>(count);
for (int i = 0; i < count; i++) {
deadServers.add(ServerName.parseVersionedServerName(Bytes.readByteArray(in)));
Collection<ServerName> backupMasters = new LinkedList<ServerName>();
for (HBaseProtos.ServerName sn : proto.getBackupMastersList()) {
backupMasters.add(ProtobufUtil.toServerName(sn));
}
count = in.readInt();
this.intransition = new TreeMap<String, RegionState>();
for (int i = 0; i < count; i++) {
String key = in.readUTF();
RegionState regionState = new RegionState();
regionState.readFields(in);
this.intransition.put(key, regionState);
}
this.clusterId = in.readUTF();
int masterCoprocessorsLength = in.readInt();
masterCoprocessors = new String[masterCoprocessorsLength];
for(int i = 0; i < masterCoprocessorsLength; i++) {
masterCoprocessors[i] = in.readUTF();
}
// Only read extra fields for master and backup masters if
// version indicates that we should do so, else use defaults
if (version >= VERSION_MASTER_BACKUPMASTERS) {
this.master = ServerName.parseVersionedServerName(
Bytes.readByteArray(in));
count = in.readInt();
this.backupMasters = new ArrayList<ServerName>(count);
for (int i = 0; i < count; i++) {
this.backupMasters.add(ServerName.parseVersionedServerName(
Bytes.readByteArray(in)));
}
} else {
this.master = new ServerName(ServerName.UNKNOWN_SERVERNAME, -1,
ServerName.NON_STARTCODE);
this.backupMasters = new ArrayList<ServerName>(0);
final Map<String, RegionState> rit = new HashMap<String, RegionState>();
for (RegionInTransition region : proto.getRegionsInTransitionList()) {
String key = new String(region.getSpec().getValue().toByteArray());
RegionState value = RegionState.convert(region.getRegionState());
rit.put(key,value);
}
final String[] masterCoprocessors = proto.getMasterCoprocessorsList().toArray(new String[0]);
return new ClusterStatus(proto.getHbaseVersion().getVersion(),
ClusterId.convert(proto.getClusterId()).toString(),servers,deadServers,
ProtobufUtil.toServerName(proto.getMaster()),backupMasters,rit,masterCoprocessors);
}
}

View File

@ -835,7 +835,6 @@ implements WritableComparable<HRegionInfo> {
/**
* Convert a HRegionInfo to a RegionInfo
*
* @param info the HRegionInfo to convert
* @return the converted RegionInfo
*/
RegionInfo convert() {

View File

@ -24,6 +24,8 @@ import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
@ -31,6 +33,7 @@ import java.util.TreeSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Strings;
import org.apache.hadoop.io.VersionedWritable;
@ -709,8 +712,44 @@ implements WritableComparable<HServerLoad> {
return count;
}
public static HServerLoad convert(ServerLoad sl) {
// TODO: This conversion of ServerLoad to HServerLoad is temporary,
// will be cleaned up in HBASE-5445. Using the ClusterStatus proto brings
// in a lot of other changes, so it makes sense to break this up.
Map<byte[],RegionLoad> regionLoad = new HashMap<byte[],RegionLoad>();
for (HBaseProtos.RegionLoad rl : sl.getRegionLoadsList()) {
Set<String> regionCoprocessors = new HashSet<String>();
for (HBaseProtos.Coprocessor coprocessor
: rl.getCoprocessorsList()) {
regionCoprocessors.add(coprocessor.getName());
}
byte [] regionName = rl.getRegionSpecifier().getValue().toByteArray();
RegionLoad converted = new RegionLoad(regionName,
rl.getStores(),rl.getStorefiles(),rl.getStoreUncompressedSizeMB(),
rl.getStorefileSizeMB(),rl.getMemstoreSizeMB(),
rl.getStorefileIndexSizeMB(),rl.getRootIndexSizeKB(),
rl.getTotalStaticIndexSizeKB(),rl.getTotalStaticBloomSizeKB(),
rl.getReadRequestsCount(),rl.getWriteRequestsCount(),
rl.getTotalCompactingKVs(),rl.getCurrentCompactedKVs(),
regionCoprocessors);
regionLoad.put(regionName, converted);
}
Set<String> coprocessors =
new HashSet<String>(Arrays.asList(ServerLoad.getRegionServerCoprocessors(sl)));
HServerLoad hsl = new HServerLoad(sl.getTotalNumberOfRequests(),
sl.getRequestsPerSecond(),sl.getUsedHeapMB(),sl.getMaxHeapMB(),
regionLoad,coprocessors);
return hsl;
}
// Writable
/**
* @deprecated Writables are going away.
*/
@Deprecated
public void readFields(DataInput in) throws IOException {
super.readFields(in);
int version = in.readByte();
@ -731,6 +770,10 @@ implements WritableComparable<HServerLoad> {
}
}
/**
* @deprecated Writables are going away.
*/
@Deprecated
public void write(DataOutput out) throws IOException {
super.write(out);
out.writeByte(VERSION);

View File

@ -88,6 +88,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescripto
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
@ -1710,8 +1711,9 @@ public class HBaseAdmin implements Abortable, Closeable {
public ClusterStatus getClusterStatus() throws IOException {
return execute(new MasterCallable<ClusterStatus>() {
@Override
public ClusterStatus call() {
return master.getClusterStatus();
public ClusterStatus call() throws ServiceException {
GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest();
return ClusterStatus.convert(master.getClusterStatus(null,req).getClusterStatus());
}
});
}

View File

@ -65,6 +65,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.security.TokenInfo;
import org.apache.hadoop.hbase.security.KerberosInfo;
@ -240,9 +242,13 @@ public interface HMasterInterface extends VersionedProtocol {
/**
* Return cluster status.
* @param controller Unused (set to null).
* @param req GetClusterStatusRequest
* @return status object
* @throws ServiceException
*/
public ClusterStatus getClusterStatus();
public GetClusterStatusResponse getClusterStatus(RpcController controller, GetClusterStatusRequest req)
throws ServiceException;
/**
* Offline a region from the assignment manager's in-memory state. The

View File

@ -73,6 +73,7 @@ import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
import org.apache.hadoop.hbase.master.handler.SplitRegionHandler;
import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
@ -3511,6 +3512,97 @@ public class AssignmentManager extends ZooKeeperListener {
+ ", server=" + serverName;
}
/**
* Convert a RegionState to an HBaseProtos.RegionState
*
* @return the converted HBaseProtos.RegionState
*/
public ClusterStatusProtos.RegionState convert() {
ClusterStatusProtos.RegionState.Builder regionState = ClusterStatusProtos.RegionState.newBuilder();
ClusterStatusProtos.RegionState.State rs;
switch (regionState.getState()) {
case OFFLINE:
rs = ClusterStatusProtos.RegionState.State.OFFLINE;
break;
case PENDING_OPEN:
rs = ClusterStatusProtos.RegionState.State.PENDING_OPEN;
break;
case OPENING:
rs = ClusterStatusProtos.RegionState.State.OPENING;
break;
case OPEN:
rs = ClusterStatusProtos.RegionState.State.OPEN;
break;
case PENDING_CLOSE:
rs = ClusterStatusProtos.RegionState.State.PENDING_CLOSE;
break;
case CLOSING:
rs = ClusterStatusProtos.RegionState.State.CLOSING;
break;
case CLOSED:
rs = ClusterStatusProtos.RegionState.State.CLOSED;
break;
case SPLITTING:
rs = ClusterStatusProtos.RegionState.State.SPLITTING;
break;
case SPLIT:
rs = ClusterStatusProtos.RegionState.State.SPLIT;
break;
default:
throw new IllegalStateException("");
}
regionState.setRegionInfo(HRegionInfo.convert(region));
regionState.setState(rs);
regionState.setStamp(getStamp());
return regionState.build();
}
/**
* Convert a protobuf HBaseProtos.RegionState to a RegionState
*
* @return the RegionState
*/
public static RegionState convert(ClusterStatusProtos.RegionState proto) {
RegionState.State state;
switch (proto.getState()) {
case OFFLINE:
state = State.OFFLINE;
break;
case PENDING_OPEN:
state = State.PENDING_OPEN;
break;
case OPENING:
state = State.OPENING;
break;
case OPEN:
state = State.OPEN;
break;
case PENDING_CLOSE:
state = State.PENDING_CLOSE;
break;
case CLOSING:
state = State.CLOSING;
break;
case CLOSED:
state = State.CLOSED;
break;
case SPLITTING:
state = State.SPLITTING;
break;
case SPLIT:
state = State.SPLIT;
break;
default:
throw new IllegalStateException("");
}
return new RegionState(HRegionInfo.convert(proto.getRegionInfo()),state,proto.getStamp(),null);
}
/**
* @deprecated Writables are going away
*/
@Deprecated
@Override
public void readFields(DataInput in) throws IOException {
region = new HRegionInfo();
@ -3519,6 +3611,10 @@ public class AssignmentManager extends ZooKeeperListener {
stamp.set(in.readLong());
}
/**
* @deprecated Writables are going away
*/
@Deprecated
@Override
public void write(DataOutput out) throws IOException {
region.write(out);

View File

@ -168,6 +168,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse;
import com.google.protobuf.ServiceException;
/**
@ -1627,6 +1629,13 @@ Server {
}
}
@Override
public GetClusterStatusResponse getClusterStatus(RpcController controller, GetClusterStatusRequest req)
throws ServiceException {
GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder();
response.setClusterStatus(getClusterStatus().convert());
return response.build();
}
/**
* @return cluster status
*/

View File

@ -274,7 +274,7 @@ public class ServerManager {
/**
* @param serverName
* @return HServerLoad if serverName is known else null
* @return ServerLoad if serverName is known else null
*/
public ServerLoad getLoad(final ServerName serverName) {
return this.onlineServers.get(serverName);
@ -282,7 +282,7 @@ public class ServerManager {
/**
* @param address
* @return HServerLoad if serverName is known else null
* @return ServerLoad if serverName is known else null
* @deprecated Use {@link #getLoad(HServerAddress)}
*/
public ServerLoad getLoad(final HServerAddress address) {

View File

@ -97,6 +97,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionReq
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@ -1101,4 +1102,13 @@ public final class RequestConverter {
public static SetBalancerRunningRequest buildLoadBalancerIsRequest(boolean on, boolean synchronous) {
return SetBalancerRunningRequest.newBuilder().setOn(on).setSynchronous(synchronous).build();
}
/**
* Creates a protocol buffer GetClusterStatusRequest
*
* @return A GetClusterStatusRequest
*/
public static GetClusterStatusRequest buildGetClusterStatusRequest() {
return GetClusterStatusRequest.newBuilder().build();
}
}

View File

@ -235,7 +235,7 @@ public class HBaseFsck {
public void connect() throws IOException {
admin = new HBaseAdmin(conf);
meta = new HTable(conf, HConstants.META_TABLE_NAME);
status = admin.getMaster().getClusterStatus();
status = admin.getClusterStatus();
connection = admin.getConnection();
}

View File

@ -0,0 +1,66 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// This file contains protocol buffers that are used for ClustStatus
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
option java_outer_classname = "ClusterStatusProtos";
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "hbase.proto";
import "ClusterId.proto";
import "FS.proto";
message RegionState {
required RegionInfo regionInfo = 1;
required State state = 2;
optional uint64 stamp = 3;
enum State {
OFFLINE = 0; // region is in an offline state
PENDING_OPEN = 1; // sent rpc to server to open but has not begun
OPENING = 2; // server has begun to open but not yet done
OPEN = 3; // server opened region and updated meta
PENDING_CLOSE = 4; // sent rpc to server to close but has not begun
CLOSING = 5; // server has begun to close but not yet done
CLOSED = 6; // server closed region and updated meta
SPLITTING = 7; // server started split of a region
SPLIT = 8; // server completed split of a region
}
}
message RegionInTransition {
required RegionSpecifier spec = 1;
required RegionState regionState = 2;
}
message LiveServerInfo {
required ServerName server = 1;
required ServerLoad serverLoad = 2;
}
message ClusterStatus {
optional HBaseVersionFileContent hbaseVersion = 1;
repeated LiveServerInfo liveServers = 2;
repeated ServerName deadServers = 3;
repeated RegionInTransition regionsInTransition = 4;
optional ClusterId clusterId = 5;
repeated Coprocessor masterCoprocessors = 6;
optional ServerName master = 7;
repeated ServerName backupMasters = 8;
}

View File

@ -25,6 +25,7 @@ option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "hbase.proto";
import "ClusterStatus.proto";
/* Column-level protobufs */
@ -177,6 +178,13 @@ message GetTableDescriptorsResponse {
repeated TableSchema tableSchema = 1;
}
message GetClusterStatusRequest {
}
message GetClusterStatusResponse {
required ClusterStatus clusterStatus = 1;
}
service MasterService {
/** Adds a column to the specified table. */
rpc addColumn(AddColumnRequest)
@ -270,4 +278,8 @@ service MasterService {
/** Get list of TableDescriptors for requested tables. */
rpc getTableDescriptors(GetTableDescriptorsRequest)
returns(GetTableDescriptorsResponse);
/** Return cluster status. */
rpc getClusterStatus(GetClusterStatusRequest)
returns(GetClusterStatusResponse);
}

View File

@ -424,7 +424,7 @@ public class TestHBaseFsck {
*/
Map<ServerName, List<String>> getDeployedHRIs(
final HBaseAdmin admin) throws IOException {
ClusterStatus status = admin.getMaster().getClusterStatus();
ClusterStatus status = admin.getClusterStatus();
Collection<ServerName> regionServers = status.getServers();
Map<ServerName, List<String>> mm =
new HashMap<ServerName, List<String>>();