HBASE-1552 provide version running on cluster via getClusterStatus
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@786666 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
3b01f1a469
commit
e1bbd8cbc5
|
@ -196,8 +196,8 @@ Release 0.20.0 - Unreleased
|
||||||
HBASE-1447 Take last version of the hbase-1249 design doc. and make
|
HBASE-1447 Take last version of the hbase-1249 design doc. and make
|
||||||
documentation out of it
|
documentation out of it
|
||||||
HBASE-1206 Scanner spins when there are concurrent inserts to column family
|
HBASE-1206 Scanner spins when there are concurrent inserts to column family
|
||||||
HBASE-1536 Controlled crash of regionserver not hosting meta/root leaves master
|
HBASE-1536 Controlled crash of regionserver not hosting meta/root leaves
|
||||||
in spinning state, regions not reassigned
|
master in spinning state, regions not reassigned
|
||||||
HBASE-1543 Unnecessary toString during scanning costs us some CPU
|
HBASE-1543 Unnecessary toString during scanning costs us some CPU
|
||||||
HBASE-1544 Cleanup HTable (Jonathan Gray via Stack)
|
HBASE-1544 Cleanup HTable (Jonathan Gray via Stack)
|
||||||
HBASE-1488 After 1304 goes in, fix and reenable test of thrift, mr indexer,
|
HBASE-1488 After 1304 goes in, fix and reenable test of thrift, mr indexer,
|
||||||
|
@ -367,6 +367,7 @@ Release 0.20.0 - Unreleased
|
||||||
families results in bad scans
|
families results in bad scans
|
||||||
HBASE-1540 Client delete unit test, define behavior
|
HBASE-1540 Client delete unit test, define behavior
|
||||||
(Jonathan Gray via Stack)
|
(Jonathan Gray via Stack)
|
||||||
|
HBASE-1552 provide version running on cluster via getClusterStatus
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
HBASE-1412 Change values for delete column and column family in KeyValue
|
HBASE-1412 Change values for delete column and column family in KeyValue
|
||||||
|
|
|
@ -262,6 +262,7 @@ module HBase
|
||||||
def status(format)
|
def status(format)
|
||||||
status = @admin.getClusterStatus()
|
status = @admin.getClusterStatus()
|
||||||
if format != nil and format == "detailed"
|
if format != nil and format == "detailed"
|
||||||
|
puts("version %s" % [ status.getHBaseVersion() ])
|
||||||
puts("%d live servers" % [ status.getServers() ])
|
puts("%d live servers" % [ status.getServers() ])
|
||||||
for server in status.getServerInfo()
|
for server in status.getServerInfo()
|
||||||
puts(" %s:%d %d" % \
|
puts(" %s:%d %d" % \
|
||||||
|
|
|
@ -45,6 +45,8 @@ import org.apache.hadoop.io.VersionedWritable;
|
||||||
*/
|
*/
|
||||||
public class ClusterStatus extends VersionedWritable {
|
public class ClusterStatus extends VersionedWritable {
|
||||||
private static final byte VERSION = 0;
|
private static final byte VERSION = 0;
|
||||||
|
|
||||||
|
private String hbaseVersion;
|
||||||
private Collection<HServerInfo> liveServerInfo;
|
private Collection<HServerInfo> liveServerInfo;
|
||||||
private Collection<String> deadServers;
|
private Collection<String> deadServers;
|
||||||
|
|
||||||
|
@ -119,6 +121,20 @@ public class ClusterStatus extends VersionedWritable {
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the HBase version string as reported by the HMaster
|
||||||
|
*/
|
||||||
|
public String getHBaseVersion() {
|
||||||
|
return hbaseVersion;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param version the HBase version string
|
||||||
|
*/
|
||||||
|
public void setHBaseVersion(String version) {
|
||||||
|
hbaseVersion = version;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see java.lang.Object#equals(java.lang.Object)
|
* @see java.lang.Object#equals(java.lang.Object)
|
||||||
*/
|
*/
|
||||||
|
@ -130,6 +146,7 @@ public class ClusterStatus extends VersionedWritable {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return (getVersion() == ((ClusterStatus)o).getVersion()) &&
|
return (getVersion() == ((ClusterStatus)o).getVersion()) &&
|
||||||
|
getHBaseVersion().equals(((ClusterStatus)o).getHBaseVersion()) &&
|
||||||
liveServerInfo.equals(((ClusterStatus)o).liveServerInfo) &&
|
liveServerInfo.equals(((ClusterStatus)o).liveServerInfo) &&
|
||||||
deadServers.equals(((ClusterStatus)o).deadServers);
|
deadServers.equals(((ClusterStatus)o).deadServers);
|
||||||
}
|
}
|
||||||
|
@ -138,7 +155,8 @@ public class ClusterStatus extends VersionedWritable {
|
||||||
* @see java.lang.Object#hashCode()
|
* @see java.lang.Object#hashCode()
|
||||||
*/
|
*/
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return VERSION + liveServerInfo.hashCode() + deadServers.hashCode();
|
return VERSION + hbaseVersion.hashCode() + liveServerInfo.hashCode() +
|
||||||
|
deadServers.hashCode();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return the object version number */
|
/** @return the object version number */
|
||||||
|
@ -179,6 +197,7 @@ public class ClusterStatus extends VersionedWritable {
|
||||||
|
|
||||||
public void write(DataOutput out) throws IOException {
|
public void write(DataOutput out) throws IOException {
|
||||||
super.write(out);
|
super.write(out);
|
||||||
|
out.writeUTF(hbaseVersion);
|
||||||
out.writeInt(liveServerInfo.size());
|
out.writeInt(liveServerInfo.size());
|
||||||
for (HServerInfo server: liveServerInfo) {
|
for (HServerInfo server: liveServerInfo) {
|
||||||
server.write(out);
|
server.write(out);
|
||||||
|
@ -191,6 +210,7 @@ public class ClusterStatus extends VersionedWritable {
|
||||||
|
|
||||||
public void readFields(DataInput in) throws IOException {
|
public void readFields(DataInput in) throws IOException {
|
||||||
super.readFields(in);
|
super.readFields(in);
|
||||||
|
hbaseVersion = in.readUTF();
|
||||||
int count = in.readInt();
|
int count = in.readInt();
|
||||||
liveServerInfo = new ArrayList<HServerInfo>(count);
|
liveServerInfo = new ArrayList<HServerInfo>(count);
|
||||||
for (int i = 0; i < count; i++) {
|
for (int i = 0; i < count; i++) {
|
||||||
|
|
|
@ -78,6 +78,7 @@ import org.apache.hadoop.hbase.util.InfoServer;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
import org.apache.hadoop.hbase.util.Sleeper;
|
import org.apache.hadoop.hbase.util.Sleeper;
|
||||||
import org.apache.hadoop.hbase.util.Writables;
|
import org.apache.hadoop.hbase.util.Writables;
|
||||||
|
import org.apache.hadoop.hbase.util.VersionInfo;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
|
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
||||||
|
@ -985,6 +986,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
||||||
*/
|
*/
|
||||||
public ClusterStatus getClusterStatus() {
|
public ClusterStatus getClusterStatus() {
|
||||||
ClusterStatus status = new ClusterStatus();
|
ClusterStatus status = new ClusterStatus();
|
||||||
|
status.setHBaseVersion(VersionInfo.getVersion());
|
||||||
status.setServerInfo(serverManager.serversToServerInfo.values());
|
status.setServerInfo(serverManager.serversToServerInfo.values());
|
||||||
status.setDeadServers(serverManager.deadServers);
|
status.setDeadServers(serverManager.deadServers);
|
||||||
return status;
|
return status;
|
||||||
|
|
Loading…
Reference in New Issue