HBASE-1454 HBaseAdmin.getClusterStatus

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@779443 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Kyle Purtell 2009-05-28 05:48:52 +00:00
parent 0f0e4579a9
commit 5e1b305116
10 changed files with 295 additions and 2 deletions

View File

@ -286,6 +286,7 @@ Release 0.20.0 - Unreleased
HBASE-1430 Read the logs in batches during log splitting to avoid OOME HBASE-1430 Read the logs in batches during log splitting to avoid OOME
HBASE-1017 Region balancing does not bring newly added node within HBASE-1017 Region balancing does not bring newly added node within
acceptable range (Evgeny Ryabitskiy via Stack) acceptable range (Evgeny Ryabitskiy via Stack)
HBASE-1454 HBaseAdmin.getClusterStatus
OPTIMIZATIONS OPTIMIZATIONS
HBASE-1412 Change values for delete column and column family in KeyValue HBASE-1412 Change values for delete column and column family in KeyValue

View File

@ -253,6 +253,31 @@ module HBase
@admin.shutdown() @admin.shutdown()
end end
def status(format)
status = @admin.getClusterStatus()
if format != nil and format == "detailed"
puts("%d live servers" % [ status.getServers() ])
for server in status.getServerInfo()
puts(" %s:%d %d" % \
[ server.getServerAddress().getHostname(), \
server.getServerAddress().getPort(), server.getStartCode() ])
puts(" %s" % [ server.getLoad().toString() ])
for region in server.getLoad().getRegionsLoad()
puts(" %s" % [ region.getNameAsString() ])
puts(" %s" % [ region.toString() ])
end
end
puts("%d dead servers" % [ status.getDeadServers() ])
for server in status.getDeadServerNames()
puts(" %s" % [ server ])
end
else
puts("%d servers, %d dead, %.4f average load" % \
[ status.getServers(), status.getDeadServers(), \
status.getAverageLoad()])
end
end
def hcd(arg) def hcd(arg)
# Return a new HColumnDescriptor made of passed args # Return a new HColumnDescriptor made of passed args
# TODO: This is brittle code. # TODO: This is brittle code.

View File

@ -237,6 +237,13 @@ HBASE SHELL COMMANDS:
hbase> scan 't1', {COLUMNS => ['c1', 'c2'], LIMIT => 10, \\ hbase> scan 't1', {COLUMNS => ['c1', 'c2'], LIMIT => 10, \\
STARTROW => 'xyz'} STARTROW => 'xyz'}
status Show cluster status. Can be 'simple' or 'detailed'. The default is
'simple'. Examples:
hbase> status
hbase> status 'simple'
hbase> status 'detailed'
shutdown Shut down the cluster. shutdown Shut down the cluster.
truncate Disables, drops and recreates the specified table. truncate Disables, drops and recreates the specified table.
@ -334,7 +341,11 @@ end
def close_region(regionName, server = nil) def close_region(regionName, server = nil)
admin().close_region(regionName, server) admin().close_region(regionName, server)
end end
def status(format = 'simple')
admin().status(format)
end
# CRUD # CRUD
def get(table, row, args = {}) def get(table, row, args = {})

View File

@ -0,0 +1,207 @@
/**
* Copyright 2009 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import org.apache.hadoop.io.VersionedWritable;
/**
* Status information on the HBase cluster.
* <p>
* <tt>ClusterStatus</tt> provides clients with information such as:
* <ul>
* <li>The count and names of region servers in the cluster.</li>
* <li>The count and names of dead region servers in the cluster.</li>
* <li>The average cluster load.</li>
* <li>The number of regions deployed on the cluster.</li>
* <li>The number of requests since last report.</li>
* <li>Detailed region server loading and resource usage information,
* per server and per region.</li>
* </ul>
*/
public class ClusterStatus extends VersionedWritable {
private static final byte VERSION = 0;
private Collection<HServerInfo> liveServerInfo;
private Collection<String> deadServers;
/**
* Constructor, for Writable
*/
public ClusterStatus() {
}
/**
* @return the names of region servers in the cluster
*/
public Collection<String> getServerNames() {
ArrayList<String> names = new ArrayList<String>(liveServerInfo.size());
for (HServerInfo server: liveServerInfo) {
names.add(server.getName());
}
return names;
}
/**
* @return the names of region servers on the dead list
*/
public Collection<String> getDeadServerNames() {
return Collections.unmodifiableCollection(deadServers);
}
/**
* @return the number of region servers in the cluster
*/
public int getServers() {
return liveServerInfo.size();
}
/**
* @return the number of dead region servers in the cluster
*/
public int getDeadServers() {
return deadServers.size();
}
/**
* @return the average cluster load
*/
public double getAverageLoad() {
int load = 0;
for (HServerInfo server: liveServerInfo) {
load += server.getLoad().getLoad();
}
return (double)load / (double)liveServerInfo.size();
}
/**
* @return the number of regions deployed on the cluster
*/
public int getRegionsCount() {
int count = 0;
for (HServerInfo server: liveServerInfo) {
count += server.getLoad().getNumberOfRegions();
}
return count;
}
/**
* @return the number of requests since last report
*/
public int getRequestsCount() {
int count = 0;
for (HServerInfo server: liveServerInfo) {
count += server.getLoad().getNumberOfRequests();
}
return count;
}
/**
* @see java.lang.Object#equals(java.lang.Object)
*/
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof ClusterStatus)) {
return false;
}
return (getVersion() == ((ClusterStatus)o).getVersion()) &&
liveServerInfo.equals(((ClusterStatus)o).liveServerInfo) &&
deadServers.equals(((ClusterStatus)o).deadServers);
}
/**
* @see java.lang.Object#hashCode()
*/
public int hashCode() {
return VERSION + liveServerInfo.hashCode() + deadServers.hashCode();
}
/** @return the object version number */
public byte getVersion() {
return VERSION;
}
//
// Getters
//
/**
* Returns detailed region server information: A list of
* {@link HServerInfo}, containing server load and resource usage
* statistics as {@link HServerLoad}, containing per-region
* statistics as {@link HServerLoad.RegionLoad}.
* @return region server information
*/
public Collection<HServerInfo> getServerInfo() {
return Collections.unmodifiableCollection(liveServerInfo);
}
//
// Setters
//
public void setServerInfo(Collection<HServerInfo> serverInfo) {
this.liveServerInfo = serverInfo;
}
public void setDeadServers(Collection<String> deadServers) {
this.deadServers = deadServers;
}
//
// Writable
//
public void write(DataOutput out) throws IOException {
super.write(out);
out.writeInt(liveServerInfo.size());
for (HServerInfo server: liveServerInfo) {
server.write(out);
}
out.writeInt(deadServers.size());
for (String server: deadServers) {
out.writeUTF(server);
}
}
public void readFields(DataInput in) throws IOException {
super.readFields(in);
int count = in.readInt();
liveServerInfo = new ArrayList<HServerInfo>(count);
for (int i = 0; i < count; i++) {
HServerInfo info = new HServerInfo();
info.readFields(in);
liveServerInfo.add(info);
}
count = in.readInt();
deadServers = new ArrayList<String>(count);
for (int i = 0; i < count; i++) {
deadServers.add(in.readUTF());
}
}
}

View File

@ -23,7 +23,10 @@ import java.io.DataInput;
import java.io.DataOutput; import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Strings; import org.apache.hadoop.hbase.util.Strings;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.WritableComparable;
@ -94,6 +97,13 @@ public class HServerLoad implements WritableComparable<HServerLoad> {
return name; return name;
} }
/**
* @return the region name as a string
*/
public String getNameAsString() {
return Bytes.toString(name);
}
/** /**
* @return the number of stores * @return the number of stores
*/ */
@ -322,6 +332,13 @@ public class HServerLoad implements WritableComparable<HServerLoad> {
return numberOfRequests; return numberOfRequests;
} }
/**
* @return region load metrics
*/
public Collection<RegionLoad> getRegionsLoad() {
return Collections.unmodifiableCollection(regionLoad);
}
/** /**
* @return Count of storefiles on this regionserver * @return Count of storefiles on this regionserver
*/ */

View File

@ -24,6 +24,7 @@ import java.util.Map;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
@ -791,6 +792,17 @@ public class HBaseAdmin {
} }
} }
/**
* @return cluster status
* @throws IOException
*/
public ClusterStatus getClusterStatus() throws IOException {
if (this.master == null) {
throw new MasterNotRunningException("master has been shut down");
}
return this.master.getClusterStatus();
}
private HRegionLocation getFirstMetaServerForTable(final byte [] tableName) private HRegionLocation getFirstMetaServerForTable(final byte [] tableName)
throws IOException { throws IOException {
return connection.locateRegion(HConstants.META_TABLE_NAME, return connection.locateRegion(HConstants.META_TABLE_NAME,

View File

@ -30,6 +30,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HMsg; import org.apache.hadoop.hbase.HMsg;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
@ -133,6 +134,7 @@ public class HbaseObjectWritable implements Writable, Configurable {
e.printStackTrace(); e.printStackTrace();
} }
addToMap(BatchUpdate[].class, code++); addToMap(BatchUpdate[].class, code++);
addToMap(ClusterStatus.class, code++);
} }
private Class<?> declaredClass; private Class<?> declaredClass;

View File

@ -70,7 +70,8 @@ public interface HBaseRPCProtocolVersion extends VersionedProtocol {
* location instead.</li> * location instead.</li>
* <li>Version 17: Added incrementColumnValue.</li> * <li>Version 17: Added incrementColumnValue.</li>
* <li>Version 18: HBASE-1302.</li> * <li>Version 18: HBASE-1302.</li>
* <li>Version 19: Added getClusterStatus().</li>
* </ul> * </ul>
*/ */
public static final long versionID = 18L; public static final long versionID = 19L;
} }

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.ipc;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
@ -116,4 +117,9 @@ public interface HMasterInterface extends HBaseRPCProtocolVersion {
* @throws IOException * @throws IOException
*/ */
public void shutdown() throws IOException; public void shutdown() throws IOException;
/**
* Return cluster status.
*/
public ClusterStatus getClusterStatus();
} }

View File

@ -40,6 +40,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
@ -969,6 +970,16 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
} }
} }
/**
* @return cluster status
*/
public ClusterStatus getClusterStatus() {
ClusterStatus status = new ClusterStatus();
status.setServerInfo(serverManager.serversToServerInfo.values());
status.setDeadServers(serverManager.deadServers);
return status;
}
/** /**
* @return Server metrics * @return Server metrics
*/ */