HBASE-5933 Hide HBaseProtos.ServerLoad and HBaseProtos.RegionLoad from ClusterStatus (Gregory)

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1353740 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Zhihong Yu 2012-06-25 20:58:05 +00:00
parent f690e086f6
commit 28c9c4dd6e
18 changed files with 343 additions and 985 deletions

View File

@ -93,15 +93,15 @@ Arrays.sort(serverNames);
for (ServerName serverName: serverNames) {
ServerLoad sl = master.getServerManager().getLoad(serverName);
int requestsPerSecond = 0,
numRegionsOnline = 0;
double requestsPerSecond = 0.0;
int numRegionsOnline = 0;
if (sl != null) {
requestsPerSecond = sl.getRequestsPerSecond();
numRegionsOnline = sl.getRegionLoadsCount();
totalRegions += sl.getRegionLoadsCount();
numRegionsOnline = sl.getNumberOfRegions();
totalRegions += sl.getNumberOfRegions();
// Is this correct? Adding a rate to a measure.
totalRequests += sl.getRequestsPerSecond();
totalRequests += sl.getNumberOfRequests();
}
long startcode = serverName.getStartcode();
</%java>
@ -144,7 +144,7 @@ for (ServerName serverName: serverNames) {
<td><& serverNameLink; serverName=serverName; &></td>
<td><% sl.getUsedHeapMB() %>MB</td>
<td><% sl.getMaxHeapMB() %>MB</td>
<td><% sl.getMemstoreSizeMB() %>MB</td>
<td><% sl.getMemstoreSizeInMB() %>MB</td>
</tr>
<%java>
@ -219,7 +219,7 @@ if (sl != null) {
<td><% sl.getStores() %></td>
<td><% sl.getStorefiles() %></td>
<td><% sl.getStoreUncompressedSizeMB() %>MB</td>
<td><% sl.getStorefileSizeMB() %>MB</td>
<td><% sl.getStorefileSizeInMB() %>MB</td>
<td><% sl.getTotalStaticIndexSizeKB() %>KB</td>
<td><% sl.getTotalStaticBloomSizeKB() %>KB</td>
</tr>

View File

@ -33,7 +33,6 @@ import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.HashSet;
import org.apache.hadoop.hbase.HServerLoad;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo;
@ -45,7 +44,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Re
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HServerLoad.RegionLoad;
import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.master.AssignmentManager.RegionState;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.VersionMismatchException;
@ -161,7 +160,7 @@ public class ClusterStatus extends VersionedWritable {
public int getRegionsCount() {
int count = 0;
for (Map.Entry<ServerName, ServerLoad> e: this.liveServers.entrySet()) {
count += e.getValue().getRegionLoadsCount();
count += e.getValue().getNumberOfRegions();
}
return count;
}
@ -262,8 +261,8 @@ public class ClusterStatus extends VersionedWritable {
* @param sn
* @return Server's load or null if not found.
*/
public HServerLoad getLoad(final ServerName sn) {
return HServerLoad.convert(this.liveServers.get(sn));
public ServerLoad getLoad(final ServerName sn) {
return this.liveServers.get(sn);
}
public Map<String, RegionState> getRegionsInTransition() {

View File

@ -1,798 +0,0 @@
/**
* Copyright 2007 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Strings;
import org.apache.hadoop.io.VersionedWritable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableUtils;
/**
* This class is used exporting current state of load on a RegionServer.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HServerLoad extends VersionedWritable
implements WritableComparable<HServerLoad> {
private static final byte VERSION = 2;
// Empty load instance.
public static final HServerLoad EMPTY_HSERVERLOAD = new HServerLoad();
/** Number of requests per second since last report.
*/
// TODO: Instead build this up out of region counters.
private int numberOfRequests = 0;
/** Total Number of requests from the start of the region server.
*/
private int totalNumberOfRequests = 0;
/** the amount of used heap, in MB */
private int usedHeapMB = 0;
/** the maximum allowable size of the heap, in MB */
private int maxHeapMB = 0;
// Regionserver-level coprocessors, e.g., WALObserver implementations.
// Region-level coprocessors, on the other hand, are stored inside RegionLoad
// objects.
private Set<String> coprocessors =
new TreeSet<String>();
/**
* HBASE-4070: Improve region server metrics to report loaded coprocessors.
*
* @return Returns the set of all coprocessors on this
* regionserver, where this set is the union of the
* regionserver-level coprocessors on one hand, and all of the region-level
* coprocessors, on the other.
*
* We must iterate through all regions loaded on this regionserver to
* obtain all of the region-level coprocessors.
*/
public String[] getCoprocessors() {
TreeSet<String> returnValue = new TreeSet<String>(coprocessors);
for (Map.Entry<byte[], RegionLoad> rls: getRegionsLoad().entrySet()) {
for (String coprocessor: rls.getValue().getCoprocessors()) {
returnValue.add(coprocessor);
}
}
return returnValue.toArray(new String[0]);
}
/** per-region load metrics */
private Map<byte[], RegionLoad> regionLoad =
new TreeMap<byte[], RegionLoad>(Bytes.BYTES_COMPARATOR);
/** @return the object version number */
public byte getVersion() {
return VERSION;
}
/**
* Encapsulates per-region loading metrics.
*/
public static class RegionLoad extends VersionedWritable {
private static final byte VERSION = 2;
/** @return the object version number */
public byte getVersion() {
return VERSION;
}
/** the region name */
private byte[] name;
/** the number of stores for the region */
private int stores;
/** the number of storefiles for the region */
private int storefiles;
/** the total size of the store files for the region, uncompressed, in MB */
private int storeUncompressedSizeMB;
/** the current total size of the store files for the region, in MB */
private int storefileSizeMB;
/** the current size of the memstore for the region, in MB */
private int memstoreSizeMB;
/**
* The current total size of root-level store file indexes for the region,
* in MB. The same as {@link #rootIndexSizeKB} but in MB.
*/
private int storefileIndexSizeMB;
/** the current total read requests made to region */
private long readRequestsCount;
/** the current total write requests made to region */
private long writeRequestsCount;
/** the total compacting key values in currently running compaction */
private long totalCompactingKVs;
/** the completed count of key values in currently running compaction */
private long currentCompactedKVs;
/** The current total size of root-level indexes for the region, in KB. */
private int rootIndexSizeKB;
/** The total size of all index blocks, not just the root level, in KB. */
private int totalStaticIndexSizeKB;
/**
* The total size of all Bloom filter blocks, not just loaded into the
* block cache, in KB.
*/
private int totalStaticBloomSizeKB;
// Region-level coprocessors.
Set<String> coprocessors =
new TreeSet<String>();
/**
* Constructor, for Writable
*/
public RegionLoad() {
super();
}
/**
* @param name
* @param stores
* @param storefiles
* @param storeUncompressedSizeMB
* @param storefileSizeMB
* @param memstoreSizeMB
* @param storefileIndexSizeMB
* @param readRequestsCount
* @param writeRequestsCount
* @param totalCompactingKVs
* @param currentCompactedKVs
* @param coprocessors
*/
public RegionLoad(final byte[] name, final int stores,
final int storefiles, final int storeUncompressedSizeMB,
final int storefileSizeMB,
final int memstoreSizeMB, final int storefileIndexSizeMB,
final int rootIndexSizeKB, final int totalStaticIndexSizeKB,
final int totalStaticBloomSizeKB,
final long readRequestsCount, final long writeRequestsCount,
final long totalCompactingKVs, final long currentCompactedKVs,
final Set<String> coprocessors) {
this.name = name;
this.stores = stores;
this.storefiles = storefiles;
this.storeUncompressedSizeMB = storeUncompressedSizeMB;
this.storefileSizeMB = storefileSizeMB;
this.memstoreSizeMB = memstoreSizeMB;
this.storefileIndexSizeMB = storefileIndexSizeMB;
this.rootIndexSizeKB = rootIndexSizeKB;
this.totalStaticIndexSizeKB = totalStaticIndexSizeKB;
this.totalStaticBloomSizeKB = totalStaticBloomSizeKB;
this.readRequestsCount = readRequestsCount;
this.writeRequestsCount = writeRequestsCount;
this.totalCompactingKVs = totalCompactingKVs;
this.currentCompactedKVs = currentCompactedKVs;
this.coprocessors = coprocessors;
}
// Getters
private String[] getCoprocessors() {
return coprocessors.toArray(new String[0]);
}
/**
* @return the region name
*/
public byte[] getName() {
return name;
}
/**
* @return the region name as a string
*/
public String getNameAsString() {
return Bytes.toString(name);
}
/**
* @return the number of stores
*/
public int getStores() {
return stores;
}
/**
* @return the number of storefiles
*/
public int getStorefiles() {
return storefiles;
}
/**
* @return the total size of the storefiles, in MB
*/
public int getStorefileSizeMB() {
return storefileSizeMB;
}
/**
* @return the memstore size, in MB
*/
public int getMemStoreSizeMB() {
return memstoreSizeMB;
}
/**
* @return the approximate size of storefile indexes on the heap, in MB
*/
public int getStorefileIndexSizeMB() {
return storefileIndexSizeMB;
}
/**
* @return the number of requests made to region
*/
public long getRequestsCount() {
return readRequestsCount + writeRequestsCount;
}
/**
* @return the number of read requests made to region
*/
public long getReadRequestsCount() {
return readRequestsCount;
}
/**
* @return the number of read requests made to region
*/
public long getWriteRequestsCount() {
return writeRequestsCount;
}
/**
* @return The current total size of root-level indexes for the region, in KB.
*/
public int getRootIndexSizeKB() {
return rootIndexSizeKB;
}
/**
* @return The total size of all index blocks, not just the root level, in KB.
*/
public int getTotalStaticIndexSizeKB() {
return totalStaticIndexSizeKB;
}
/**
* @return The total size of all Bloom filter blocks, not just loaded into the
* block cache, in KB.
*/
public int getTotalStaticBloomSizeKB() {
return totalStaticBloomSizeKB;
}
/**
* @return the total number of kvs in current compaction
*/
public long getTotalCompactingKVs() {
return totalCompactingKVs;
}
/**
* @return the number of already compacted kvs in current compaction
*/
public long getCurrentCompactedKVs() {
return currentCompactedKVs;
}
// Setters
/**
* @param name the region name
*/
public void setName(byte[] name) {
this.name = name;
}
/**
* @param stores the number of stores
*/
public void setStores(int stores) {
this.stores = stores;
}
/**
* @param storefiles the number of storefiles
*/
public void setStorefiles(int storefiles) {
this.storefiles = storefiles;
}
/**
* @param memstoreSizeMB the memstore size, in MB
*/
public void setMemStoreSizeMB(int memstoreSizeMB) {
this.memstoreSizeMB = memstoreSizeMB;
}
/**
* @param storefileIndexSizeMB the approximate size of storefile indexes
* on the heap, in MB
*/
public void setStorefileIndexSizeMB(int storefileIndexSizeMB) {
this.storefileIndexSizeMB = storefileIndexSizeMB;
}
/**
* @param requestsCount the number of read requests to region
*/
public void setReadRequestsCount(int requestsCount) {
this.readRequestsCount = requestsCount;
}
/**
* @param requestsCount the number of write requests to region
*/
public void setWriteRequestsCount(int requestsCount) {
this.writeRequestsCount = requestsCount;
}
/**
* @param totalCompactingKVs the number of kvs total in current compaction
*/
public void setTotalCompactingKVs(long totalCompactingKVs) {
this.totalCompactingKVs = totalCompactingKVs;
}
/**
* @param currentCompactedKVs the number of kvs already compacted in
* current compaction
*/
public void setCurrentCompactedKVs(long currentCompactedKVs) {
this.currentCompactedKVs = currentCompactedKVs;
}
/**
* HBASE-5256 and HBASE-5283 introduced incompatible serialization changes
* This method reads the fields in 0.92 serialization format, ex-version field
* @param in
* @throws IOException
*/
private void readFields92(DataInput in) throws IOException {
// in 0.92, the version was actually written twice, consume the second copy
in.readByte(); // version
int namelen = in.readInt();
this.name = new byte[namelen];
in.readFully(this.name);
this.stores = in.readInt();
this.storefiles = in.readInt();
this.storeUncompressedSizeMB = in.readInt();
this.storefileSizeMB = in.readInt();
this.memstoreSizeMB = in.readInt();
this.storefileIndexSizeMB = in.readInt();
this.readRequestsCount = in.readInt();
this.writeRequestsCount = in.readInt();
this.rootIndexSizeKB = in.readInt();
this.totalStaticIndexSizeKB = in.readInt();
this.totalStaticBloomSizeKB = in.readInt();
this.totalCompactingKVs = in.readLong();
this.currentCompactedKVs = in.readLong();
int coprocessorsSize = in.readInt();
coprocessors = new TreeSet<String>();
for (int i = 0; i < coprocessorsSize; i++) {
coprocessors.add(in.readUTF());
}
}
// Writable
public void readFields(DataInput in) throws IOException {
int version = in.readByte();
if (version > VERSION) throw new IOException("Version mismatch; " + version);
if (version == 1) {
readFields92(in);
return;
}
int namelen = WritableUtils.readVInt(in);
this.name = new byte[namelen];
in.readFully(this.name);
this.stores = WritableUtils.readVInt(in);
this.storefiles = WritableUtils.readVInt(in);
this.storeUncompressedSizeMB = WritableUtils.readVInt(in);
this.storefileSizeMB = WritableUtils.readVInt(in);
this.memstoreSizeMB = WritableUtils.readVInt(in);
this.storefileIndexSizeMB = WritableUtils.readVInt(in);
this.readRequestsCount = WritableUtils.readVLong(in);
this.writeRequestsCount = WritableUtils.readVLong(in);
this.rootIndexSizeKB = WritableUtils.readVInt(in);
this.totalStaticIndexSizeKB = WritableUtils.readVInt(in);
this.totalStaticBloomSizeKB = WritableUtils.readVInt(in);
this.totalCompactingKVs = WritableUtils.readVLong(in);
this.currentCompactedKVs = WritableUtils.readVLong(in);
int coprocessorsSize = WritableUtils.readVInt(in);
coprocessors = new TreeSet<String>();
for (int i = 0; i < coprocessorsSize; i++) {
coprocessors.add(in.readUTF());
}
}
public void write(DataOutput out) throws IOException {
super.write(out);
WritableUtils.writeVInt(out, name.length);
out.write(name);
WritableUtils.writeVInt(out, stores);
WritableUtils.writeVInt(out, storefiles);
WritableUtils.writeVInt(out, storeUncompressedSizeMB);
WritableUtils.writeVInt(out, storefileSizeMB);
WritableUtils.writeVInt(out, memstoreSizeMB);
WritableUtils.writeVInt(out, storefileIndexSizeMB);
WritableUtils.writeVLong(out, readRequestsCount);
WritableUtils.writeVLong(out, writeRequestsCount);
WritableUtils.writeVInt(out, rootIndexSizeKB);
WritableUtils.writeVInt(out, totalStaticIndexSizeKB);
WritableUtils.writeVInt(out, totalStaticBloomSizeKB);
WritableUtils.writeVLong(out, totalCompactingKVs);
WritableUtils.writeVLong(out, currentCompactedKVs);
WritableUtils.writeVInt(out, coprocessors.size());
for (String coprocessor: coprocessors) {
out.writeUTF(coprocessor);
}
}
/**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "numberOfStores",
Integer.valueOf(this.stores));
sb = Strings.appendKeyValue(sb, "numberOfStorefiles",
Integer.valueOf(this.storefiles));
sb = Strings.appendKeyValue(sb, "storefileUncompressedSizeMB",
Integer.valueOf(this.storeUncompressedSizeMB));
sb = Strings.appendKeyValue(sb, "storefileSizeMB",
Integer.valueOf(this.storefileSizeMB));
if (this.storeUncompressedSizeMB != 0) {
sb = Strings.appendKeyValue(sb, "compressionRatio",
String.format("%.4f", (float)this.storefileSizeMB/
(float)this.storeUncompressedSizeMB));
}
sb = Strings.appendKeyValue(sb, "memstoreSizeMB",
Integer.valueOf(this.memstoreSizeMB));
sb = Strings.appendKeyValue(sb, "storefileIndexSizeMB",
Integer.valueOf(this.storefileIndexSizeMB));
sb = Strings.appendKeyValue(sb, "readRequestsCount",
Long.valueOf(this.readRequestsCount));
sb = Strings.appendKeyValue(sb, "writeRequestsCount",
Long.valueOf(this.writeRequestsCount));
sb = Strings.appendKeyValue(sb, "rootIndexSizeKB",
Integer.valueOf(this.rootIndexSizeKB));
sb = Strings.appendKeyValue(sb, "totalStaticIndexSizeKB",
Integer.valueOf(this.totalStaticIndexSizeKB));
sb = Strings.appendKeyValue(sb, "totalStaticBloomSizeKB",
Integer.valueOf(this.totalStaticBloomSizeKB));
sb = Strings.appendKeyValue(sb, "totalCompactingKVs",
Long.valueOf(this.totalCompactingKVs));
sb = Strings.appendKeyValue(sb, "currentCompactedKVs",
Long.valueOf(this.currentCompactedKVs));
float compactionProgressPct = Float.NaN;
if( this.totalCompactingKVs > 0 ) {
compactionProgressPct = Float.valueOf(
(float)this.currentCompactedKVs / this.totalCompactingKVs);
}
sb = Strings.appendKeyValue(sb, "compactionProgressPct",
compactionProgressPct);
String coprocessors = Arrays.toString(getCoprocessors());
if (coprocessors != null) {
sb = Strings.appendKeyValue(sb, "coprocessors",
Arrays.toString(getCoprocessors()));
}
return sb.toString();
}
}
/*
* TODO: Other metrics that might be considered when the master is actually
* doing load balancing instead of merely trying to decide where to assign
* a region:
* <ul>
* <li># of CPUs, heap size (to determine the "class" of machine). For
* now, we consider them to be homogeneous.</li>
* <li>#requests per region (Map<{String|HRegionInfo}, Integer>)</li>
* <li>#compactions and/or #splits (churn)</li>
* <li>server death rate (maybe there is something wrong with this server)</li>
* </ul>
*/
/** default constructor (used by Writable) */
public HServerLoad() {
super();
}
/**
* Constructor
* @param numberOfRequests
* @param usedHeapMB
* @param maxHeapMB
* @param coprocessors : coprocessors loaded at the regionserver-level
*/
public HServerLoad(final int totalNumberOfRequests,
final int numberOfRequests, final int usedHeapMB, final int maxHeapMB,
final Map<byte[], RegionLoad> regionLoad,
final Set<String> coprocessors) {
this.numberOfRequests = numberOfRequests;
this.usedHeapMB = usedHeapMB;
this.maxHeapMB = maxHeapMB;
this.regionLoad = regionLoad;
this.totalNumberOfRequests = totalNumberOfRequests;
this.coprocessors = coprocessors;
}
/**
* Constructor
* @param hsl the template HServerLoad
*/
public HServerLoad(final HServerLoad hsl) {
this(hsl.totalNumberOfRequests, hsl.numberOfRequests, hsl.usedHeapMB,
hsl.maxHeapMB, hsl.getRegionsLoad(), hsl.coprocessors);
for (Map.Entry<byte[], RegionLoad> e : hsl.regionLoad.entrySet()) {
this.regionLoad.put(e.getKey(), e.getValue());
}
}
/**
* Originally, this method factored in the effect of requests going to the
* server as well. However, this does not interact very well with the current
* region rebalancing code, which only factors number of regions. For the
* interim, until we can figure out how to make rebalancing use all the info
* available, we're just going to make load purely the number of regions.
*
* @return load factor for this server
*/
public int getLoad() {
// int load = numberOfRequests == 0 ? 1 : numberOfRequests;
// load *= numberOfRegions == 0 ? 1 : numberOfRegions;
// return load;
return this.regionLoad.size();
}
/**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return toString(1);
}
/**
* Returns toString() with the number of requests divided by the message
* interval in seconds
* @param msgInterval
* @return The load as a String
*/
public String toString(int msgInterval) {
int numberOfRegions = this.regionLoad.size();
StringBuilder sb = new StringBuilder();
sb = Strings.appendKeyValue(sb, "requestsPerSecond",
Integer.valueOf(numberOfRequests/msgInterval));
sb = Strings.appendKeyValue(sb, "numberOfOnlineRegions",
Integer.valueOf(numberOfRegions));
sb = Strings.appendKeyValue(sb, "usedHeapMB",
Integer.valueOf(this.usedHeapMB));
sb = Strings.appendKeyValue(sb, "maxHeapMB", Integer.valueOf(maxHeapMB));
return sb.toString();
}
/**
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null) {
return false;
}
if (getClass() != o.getClass()) {
return false;
}
return compareTo((HServerLoad)o) == 0;
}
// Getters
/**
* @return the numberOfRegions
*/
public int getNumberOfRegions() {
return this.regionLoad.size();
}
/**
* @return the numberOfRequests per second.
*/
public int getNumberOfRequests() {
return numberOfRequests;
}
/**
* @return the numberOfRequests
*/
public int getTotalNumberOfRequests() {
return totalNumberOfRequests;
}
/**
* @return the amount of heap in use, in MB
*/
public int getUsedHeapMB() {
return usedHeapMB;
}
/**
* @return the maximum allowable heap size, in MB
*/
public int getMaxHeapMB() {
return maxHeapMB;
}
/**
* @return region load metrics
*/
public Map<byte[], RegionLoad> getRegionsLoad() {
return Collections.unmodifiableMap(regionLoad);
}
/**
* @return Count of storefiles on this regionserver
*/
public int getStorefiles() {
int count = 0;
for (RegionLoad info: regionLoad.values())
count += info.getStorefiles();
return count;
}
/**
* @return Total size of store files in MB
*/
public int getStorefileSizeInMB() {
int count = 0;
for (RegionLoad info: regionLoad.values())
count += info.getStorefileSizeMB();
return count;
}
/**
* @return Size of memstores in MB
*/
public int getMemStoreSizeInMB() {
int count = 0;
for (RegionLoad info: regionLoad.values())
count += info.getMemStoreSizeMB();
return count;
}
/**
* @return Size of store file indexes in MB
*/
public int getStorefileIndexSizeInMB() {
int count = 0;
for (RegionLoad info: regionLoad.values())
count += info.getStorefileIndexSizeMB();
return count;
}
public static HServerLoad convert(ServerLoad sl) {
// TODO: This conversion of ServerLoad to HServerLoad is temporary,
// will be cleaned up in HBASE-5445. Using the ClusterStatus proto brings
// in a lot of other changes, so it makes sense to break this up.
Map<byte[],RegionLoad> regionLoad = new HashMap<byte[],RegionLoad>();
for (HBaseProtos.RegionLoad rl : sl.getRegionLoadsList()) {
Set<String> regionCoprocessors = new HashSet<String>();
for (HBaseProtos.Coprocessor coprocessor
: rl.getCoprocessorsList()) {
regionCoprocessors.add(coprocessor.getName());
}
byte [] regionName = rl.getRegionSpecifier().getValue().toByteArray();
RegionLoad converted = new RegionLoad(regionName,
rl.getStores(),rl.getStorefiles(),rl.getStoreUncompressedSizeMB(),
rl.getStorefileSizeMB(),rl.getMemstoreSizeMB(),
rl.getStorefileIndexSizeMB(),rl.getRootIndexSizeKB(),
rl.getTotalStaticIndexSizeKB(),rl.getTotalStaticBloomSizeKB(),
rl.getReadRequestsCount(),rl.getWriteRequestsCount(),
rl.getTotalCompactingKVs(),rl.getCurrentCompactedKVs(),
regionCoprocessors);
regionLoad.put(regionName, converted);
}
Set<String> coprocessors =
new HashSet<String>(Arrays.asList(ServerLoad.getRegionServerCoprocessors(sl)));
HServerLoad hsl = new HServerLoad(sl.getTotalNumberOfRequests(),
sl.getRequestsPerSecond(),sl.getUsedHeapMB(),sl.getMaxHeapMB(),
regionLoad,coprocessors);
return hsl;
}
// Writable
/**
* @deprecated Writables are going away.
*/
@Deprecated
public void readFields(DataInput in) throws IOException {
super.readFields(in);
int version = in.readByte();
if (version > VERSION) throw new IOException("Version mismatch; " + version);
numberOfRequests = in.readInt();
usedHeapMB = in.readInt();
maxHeapMB = in.readInt();
int numberOfRegions = in.readInt();
for (int i = 0; i < numberOfRegions; i++) {
RegionLoad rl = new RegionLoad();
rl.readFields(in);
regionLoad.put(rl.getName(), rl);
}
totalNumberOfRequests = in.readInt();
int coprocessorsSize = in.readInt();
for(int i = 0; i < coprocessorsSize; i++) {
coprocessors.add(in.readUTF());
}
}
/**
* @deprecated Writables are going away.
*/
@Deprecated
public void write(DataOutput out) throws IOException {
super.write(out);
out.writeByte(VERSION);
out.writeInt(numberOfRequests);
out.writeInt(usedHeapMB);
out.writeInt(maxHeapMB);
out.writeInt(this.regionLoad.size());
for (RegionLoad rl: regionLoad.values())
rl.write(out);
out.writeInt(totalNumberOfRequests);
out.writeInt(coprocessors.size());
for (String coprocessor: coprocessors) {
out.writeUTF(coprocessor);
}
}
// Comparable
public int compareTo(HServerLoad o) {
return this.getLoad() - o.getLoad();
}
}

View File

@ -21,14 +21,18 @@
package org.apache.hadoop.hbase;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Strings;
/**
@ -78,12 +82,12 @@ public class ServerLoad {
protected HBaseProtos.ServerLoad serverLoad;
/* @return number of requests per second since last report. */
public int getRequestsPerSecond() {
return serverLoad.getRequestsPerSecond();
/* @return number of requests since last report. */
public int getNumberOfRequests() {
return serverLoad.getNumberOfRequests();
}
public boolean hasRequestsPerSecond() {
return serverLoad.hasRequestsPerSecond();
public boolean hasNumberOfRequests() {
return serverLoad.hasNumberOfRequests();
}
/* @return total Number of requests from the start of the region server. */
@ -110,31 +114,6 @@ public class ServerLoad {
return serverLoad.hasMaxHeapMB();
}
/* Returns list of RegionLoads, which contain information on the load of individual regions. */
public List<RegionLoad> getRegionLoadsList() {
return serverLoad.getRegionLoadsList();
}
public RegionLoad getRegionLoads(int index) {
return serverLoad.getRegionLoads(index);
}
public int getRegionLoadsCount() {
return serverLoad.getRegionLoadsCount();
}
/**
* @return the list Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside the RegionLoad objects.
*/
public List<Coprocessor> getCoprocessorsList() {
return serverLoad.getCoprocessorsList();
}
public Coprocessor getCoprocessors(int index) {
return serverLoad.getCoprocessors(index);
}
public int getCoprocessorsCount() {
return serverLoad.getCoprocessorsCount();
}
public int getStores() {
return stores;
}
@ -147,15 +126,15 @@ public class ServerLoad {
return storeUncompressedSizeMB;
}
public int getStorefileSizeMB() {
public int getStorefileSizeInMB() {
return storefileSizeMB;
}
public int getMemstoreSizeMB() {
public int getMemstoreSizeInMB() {
return memstoreSizeMB;
}
public int getStorefileIndexSizeMB() {
public int getStorefileIndexSizeInMB() {
return storefileIndexSizeMB;
}
@ -188,16 +167,48 @@ public class ServerLoad {
}
/**
* Return the RegionServer-level coprocessors from a ServerLoad pb.
* @param sl - ServerLoad
* @return string array of loaded RegionServer-level coprocessors
* @return the number of regions
*/
public static String[] getRegionServerCoprocessors(ServerLoad sl) {
if (sl == null) {
return null;
public int getNumberOfRegions() {
return serverLoad.getRegionLoadsCount();
}
List<Coprocessor> list = sl.getCoprocessorsList();
/**
* Originally, this method factored in the effect of requests going to the
* server as well. However, this does not interact very well with the current
* region rebalancing code, which only factors number of regions. For the
* interim, until we can figure out how to make rebalancing use all the info
* available, we're just going to make load purely the number of regions.
*
* @return load factor for this server
*/
public int getLoad() {
// See above comment
// int load = numberOfRequests == 0 ? 1 : numberOfRequests;
// load *= numberOfRegions == 0 ? 1 : numberOfRegions;
// return load;
return getNumberOfRegions();
}
/**
* @return region load metrics
*/
public Map<byte[], RegionLoad> getRegionsLoad() {
Map<byte[], RegionLoad> regionLoads =
new TreeMap<byte[], RegionLoad>(Bytes.BYTES_COMPARATOR);
for (HBaseProtos.RegionLoad rl : serverLoad.getRegionLoadsList()) {
RegionLoad regionLoad = new RegionLoad(rl);
regionLoads.put(regionLoad.getName(), regionLoad);
}
return regionLoads;
}
/**
* Return the RegionServer-level coprocessors
* @return string array of loaded RegionServer-level coprocessors
*/
public String[] getRegionServerCoprocessors() {
List<Coprocessor> list = getServerLoadPB().getCoprocessorsList();
String [] ret = new String[list.size()];
int i = 0;
for (Coprocessor elem : list) {
@ -209,23 +220,18 @@ public class ServerLoad {
/**
* Return the RegionServer-level and Region-level coprocessors
* from a ServerLoad pb.
* @param sl - ServerLoad
* @return string array of loaded RegionServer-level and
* Region-level coprocessors
*/
public static String[] getAllCoprocessors(ServerLoad sl) {
if (sl == null) {
return null;
}
public String[] getAllCoprocessors() {
// Need a set to remove duplicates, but since generated Coprocessor class
// is not Comparable, make it a Set<String> instead of Set<Coprocessor>
TreeSet<String> coprocessSet = new TreeSet<String>();
for (Coprocessor coprocessor : sl.getCoprocessorsList()) {
for (Coprocessor coprocessor : getServerLoadPB().getCoprocessorsList()) {
coprocessSet.add(coprocessor.getName());
}
for (RegionLoad rl : sl.getRegionLoadsList()) {
for (HBaseProtos.RegionLoad rl : getServerLoadPB().getRegionLoadsList()) {
for (Coprocessor coprocessor : rl.getCoprocessorsList()) {
coprocessSet.add(coprocessor.getName());
}
@ -234,13 +240,30 @@ public class ServerLoad {
return coprocessSet.toArray(new String[0]);
}
/**
* @deprecated Use getAllCoprocessors instead
*/
public String[] getCoprocessors() {
return getAllCoprocessors();
}
/**
* @return number of requests per second received since the last report
*/
public double getRequestsPerSecond() {
long msgInterval = serverLoad.getReportEndTime() - serverLoad.getReportStartTime();
return (msgInterval==0)?0.0:(getNumberOfRequests()/(double)msgInterval);
}
/**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb =
Strings.appendKeyValue(new StringBuilder(), "requestsPerSecond",
Integer.valueOf(this.getRequestsPerSecond()));
Strings.appendKeyValue(sb, "numberOfOnlineRegions", Integer.valueOf(getRegionLoadsCount()));
Double.valueOf(getRequestsPerSecond()));
Strings.appendKeyValue(sb, "numberOfOnlineRegions", Integer.valueOf(getNumberOfRegions()));
sb = Strings.appendKeyValue(sb, "usedHeapMB", Integer.valueOf(this.getUsedHeapMB()));
sb = Strings.appendKeyValue(sb, "maxHeapMB", Integer.valueOf(getMaxHeapMB()));
sb = Strings.appendKeyValue(sb, "numberOfStores", Integer.valueOf(this.stores));
@ -279,7 +302,7 @@ public class ServerLoad {
}
sb = Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct);
String[] coprocessorStrings = getAllCoprocessors(this);
String[] coprocessorStrings = getAllCoprocessors();
if (coprocessorStrings != null) {
sb = Strings.appendKeyValue(sb, "coprocessors", Arrays.toString(coprocessorStrings));
}

View File

@ -31,7 +31,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HServerLoad;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.ServerName;
@ -76,7 +77,7 @@ public class AvroUtil {
return asa;
}
static public ARegionLoad hrlToARL(HServerLoad.RegionLoad rl) throws IOException {
static public ARegionLoad hrlToARL(RegionLoad rl) throws IOException {
ARegionLoad arl = new ARegionLoad();
arl.memStoreSizeMB = rl.getMemStoreSizeMB();
arl.name = ByteBuffer.wrap(rl.getName());
@ -87,20 +88,20 @@ public class AvroUtil {
return arl;
}
static public AServerLoad hslToASL(HServerLoad hsl) throws IOException {
static public AServerLoad hslToASL(ServerLoad sl) throws IOException {
AServerLoad asl = new AServerLoad();
asl.load = hsl.getLoad();
asl.maxHeapMB = hsl.getMaxHeapMB();
asl.memStoreSizeInMB = hsl.getMemStoreSizeInMB();
asl.numberOfRegions = hsl.getNumberOfRegions();
asl.numberOfRequests = hsl.getNumberOfRequests();
asl.load = sl.getLoad();
asl.maxHeapMB = sl.getMaxHeapMB();
asl.memStoreSizeInMB = sl.getMemstoreSizeInMB();
asl.numberOfRegions = sl.getNumberOfRegions();
asl.numberOfRequests = sl.getNumberOfRequests();
Collection<HServerLoad.RegionLoad> regionLoads = hsl.getRegionsLoad().values();
Collection<RegionLoad> regionLoads = sl.getRegionsLoad().values();
Schema s = Schema.createArray(ARegionLoad.SCHEMA$);
GenericData.Array<ARegionLoad> aregionLoads = null;
if (regionLoads != null) {
aregionLoads = new GenericData.Array<ARegionLoad>(regionLoads.size(), s);
for (HServerLoad.RegionLoad rl : regionLoads) {
for (RegionLoad rl : regionLoads) {
aregionLoads.add(hrlToARL(rl));
}
} else {
@ -108,17 +109,17 @@ public class AvroUtil {
}
asl.regionsLoad = aregionLoads;
asl.storefileIndexSizeInMB = hsl.getStorefileIndexSizeInMB();
asl.storefiles = hsl.getStorefiles();
asl.storefileSizeInMB = hsl.getStorefileSizeInMB();
asl.usedHeapMB = hsl.getUsedHeapMB();
asl.storefileIndexSizeInMB = sl.getStorefileIndexSizeInMB();
asl.storefiles = sl.getStorefiles();
asl.storefileSizeInMB = sl.getStorefileSizeInMB();
asl.usedHeapMB = sl.getUsedHeapMB();
return asl;
}
static public AServerInfo hsiToASI(ServerName sn, HServerLoad hsl) throws IOException {
static public AServerInfo hsiToASI(ServerName sn, ServerLoad sl) throws IOException {
AServerInfo asi = new AServerInfo();
asi.infoPort = -1;
asi.load = hslToASL(hsl);
asi.load = hslToASL(sl);
asi.serverAddress = hsaToASA(new HServerAddress(sn.getHostname(), sn.getPort()));
asi.serverName = new Utf8(sn.toString());
asi.startCode = sn.getStartcode();

View File

@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HServerLoad;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Action;
@ -253,7 +252,9 @@ public class HbaseObjectWritable implements Writable, WritableWithSize, Configur
addToMap(ColumnRangeFilter.class, code++);
addToMap(HServerLoad.class, code++);
// HServerLoad no longer exists; increase code so other classes stay the same.
code++;
//addToMap(HServerLoad.class, code++);
addToMap(RegionOpeningState.class, code++);

View File

@ -304,7 +304,7 @@ public class ServerManager {
double averageLoad = 0.0;
for (ServerLoad sl: this.onlineServers.values()) {
numServers++;
totalLoad += sl.getRegionLoadsCount();
totalLoad += sl.getNumberOfRegions();
}
averageLoad = (double)totalLoad / (double)numServers;
return averageLoad;

View File

@ -25,8 +25,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerLoad;
import org.apache.hadoop.hbase.HServerLoad.RegionLoad;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionPlan;
@ -316,9 +316,9 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
private void updateRegionLoad() {
loads.clear();
for (ServerName sn : clusterStatus.getServers()) {
HServerLoad hsl = clusterStatus.getLoad(sn);
if (hsl == null) continue;
for (Entry<byte[], RegionLoad> entry : hsl.getRegionsLoad().entrySet()) {
ServerLoad sl = clusterStatus.getLoad(sn);
if (sl == null) continue;
for (Entry<byte[], RegionLoad> entry : sl.getRegionsLoad().entrySet()) {
loads.put(Bytes.toString(entry.getKey()), entry.getValue());
}

View File

@ -5521,9 +5521,9 @@ public final class HBaseProtos {
public interface ServerLoadOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional uint32 requestsPerSecond = 1;
boolean hasRequestsPerSecond();
int getRequestsPerSecond();
// optional uint32 numberOfRequests = 1;
boolean hasNumberOfRequests();
int getNumberOfRequests();
// optional uint32 totalNumberOfRequests = 2;
boolean hasTotalNumberOfRequests();
@ -5556,6 +5556,14 @@ public final class HBaseProtos {
getCoprocessorsOrBuilderList();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder(
int index);
// optional uint64 reportStartTime = 7;
boolean hasReportStartTime();
long getReportStartTime();
// optional uint64 reportEndTime = 8;
boolean hasReportEndTime();
long getReportEndTime();
}
public static final class ServerLoad extends
com.google.protobuf.GeneratedMessage
@ -5586,14 +5594,14 @@ public final class HBaseProtos {
}
private int bitField0_;
// optional uint32 requestsPerSecond = 1;
public static final int REQUESTSPERSECOND_FIELD_NUMBER = 1;
private int requestsPerSecond_;
public boolean hasRequestsPerSecond() {
// optional uint32 numberOfRequests = 1;
public static final int NUMBEROFREQUESTS_FIELD_NUMBER = 1;
private int numberOfRequests_;
public boolean hasNumberOfRequests() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public int getRequestsPerSecond() {
return requestsPerSecond_;
public int getNumberOfRequests() {
return numberOfRequests_;
}
// optional uint32 totalNumberOfRequests = 2;
@ -5668,13 +5676,35 @@ public final class HBaseProtos {
return coprocessors_.get(index);
}
// optional uint64 reportStartTime = 7;
public static final int REPORTSTARTTIME_FIELD_NUMBER = 7;
private long reportStartTime_;
public boolean hasReportStartTime() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
public long getReportStartTime() {
return reportStartTime_;
}
// optional uint64 reportEndTime = 8;
public static final int REPORTENDTIME_FIELD_NUMBER = 8;
private long reportEndTime_;
public boolean hasReportEndTime() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
public long getReportEndTime() {
return reportEndTime_;
}
private void initFields() {
requestsPerSecond_ = 0;
numberOfRequests_ = 0;
totalNumberOfRequests_ = 0;
usedHeapMB_ = 0;
maxHeapMB_ = 0;
regionLoads_ = java.util.Collections.emptyList();
coprocessors_ = java.util.Collections.emptyList();
reportStartTime_ = 0L;
reportEndTime_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@ -5701,7 +5731,7 @@ public final class HBaseProtos {
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt32(1, requestsPerSecond_);
output.writeUInt32(1, numberOfRequests_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, totalNumberOfRequests_);
@ -5718,6 +5748,12 @@ public final class HBaseProtos {
for (int i = 0; i < coprocessors_.size(); i++) {
output.writeMessage(6, coprocessors_.get(i));
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(7, reportStartTime_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt64(8, reportEndTime_);
}
getUnknownFields().writeTo(output);
}
@ -5729,7 +5765,7 @@ public final class HBaseProtos {
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(1, requestsPerSecond_);
.computeUInt32Size(1, numberOfRequests_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
@ -5751,6 +5787,14 @@ public final class HBaseProtos {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(6, coprocessors_.get(i));
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(7, reportStartTime_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(8, reportEndTime_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@ -5774,10 +5818,10 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad) obj;
boolean result = true;
result = result && (hasRequestsPerSecond() == other.hasRequestsPerSecond());
if (hasRequestsPerSecond()) {
result = result && (getRequestsPerSecond()
== other.getRequestsPerSecond());
result = result && (hasNumberOfRequests() == other.hasNumberOfRequests());
if (hasNumberOfRequests()) {
result = result && (getNumberOfRequests()
== other.getNumberOfRequests());
}
result = result && (hasTotalNumberOfRequests() == other.hasTotalNumberOfRequests());
if (hasTotalNumberOfRequests()) {
@ -5798,6 +5842,16 @@ public final class HBaseProtos {
.equals(other.getRegionLoadsList());
result = result && getCoprocessorsList()
.equals(other.getCoprocessorsList());
result = result && (hasReportStartTime() == other.hasReportStartTime());
if (hasReportStartTime()) {
result = result && (getReportStartTime()
== other.getReportStartTime());
}
result = result && (hasReportEndTime() == other.hasReportEndTime());
if (hasReportEndTime()) {
result = result && (getReportEndTime()
== other.getReportEndTime());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@ -5807,9 +5861,9 @@ public final class HBaseProtos {
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRequestsPerSecond()) {
hash = (37 * hash) + REQUESTSPERSECOND_FIELD_NUMBER;
hash = (53 * hash) + getRequestsPerSecond();
if (hasNumberOfRequests()) {
hash = (37 * hash) + NUMBEROFREQUESTS_FIELD_NUMBER;
hash = (53 * hash) + getNumberOfRequests();
}
if (hasTotalNumberOfRequests()) {
hash = (37 * hash) + TOTALNUMBEROFREQUESTS_FIELD_NUMBER;
@ -5831,6 +5885,14 @@ public final class HBaseProtos {
hash = (37 * hash) + COPROCESSORS_FIELD_NUMBER;
hash = (53 * hash) + getCoprocessorsList().hashCode();
}
if (hasReportStartTime()) {
hash = (37 * hash) + REPORTSTARTTIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getReportStartTime());
}
if (hasReportEndTime()) {
hash = (37 * hash) + REPORTENDTIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getReportEndTime());
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
@ -5949,7 +6011,7 @@ public final class HBaseProtos {
public Builder clear() {
super.clear();
requestsPerSecond_ = 0;
numberOfRequests_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
totalNumberOfRequests_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
@ -5969,6 +6031,10 @@ public final class HBaseProtos {
} else {
coprocessorsBuilder_.clear();
}
reportStartTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000040);
reportEndTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000080);
return this;
}
@ -6010,7 +6076,7 @@ public final class HBaseProtos {
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.requestsPerSecond_ = requestsPerSecond_;
result.numberOfRequests_ = numberOfRequests_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
@ -6041,6 +6107,14 @@ public final class HBaseProtos {
} else {
result.coprocessors_ = coprocessorsBuilder_.build();
}
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000010;
}
result.reportStartTime_ = reportStartTime_;
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000020;
}
result.reportEndTime_ = reportEndTime_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@ -6057,8 +6131,8 @@ public final class HBaseProtos {
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance()) return this;
if (other.hasRequestsPerSecond()) {
setRequestsPerSecond(other.getRequestsPerSecond());
if (other.hasNumberOfRequests()) {
setNumberOfRequests(other.getNumberOfRequests());
}
if (other.hasTotalNumberOfRequests()) {
setTotalNumberOfRequests(other.getTotalNumberOfRequests());
@ -6121,6 +6195,12 @@ public final class HBaseProtos {
}
}
}
if (other.hasReportStartTime()) {
setReportStartTime(other.getReportStartTime());
}
if (other.hasReportEndTime()) {
setReportEndTime(other.getReportEndTime());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@ -6166,7 +6246,7 @@ public final class HBaseProtos {
}
case 8: {
bitField0_ |= 0x00000001;
requestsPerSecond_ = input.readUInt32();
numberOfRequests_ = input.readUInt32();
break;
}
case 16: {
@ -6196,29 +6276,39 @@ public final class HBaseProtos {
addCoprocessors(subBuilder.buildPartial());
break;
}
case 56: {
bitField0_ |= 0x00000040;
reportStartTime_ = input.readUInt64();
break;
}
case 64: {
bitField0_ |= 0x00000080;
reportEndTime_ = input.readUInt64();
break;
}
}
}
}
private int bitField0_;
// optional uint32 requestsPerSecond = 1;
private int requestsPerSecond_ ;
public boolean hasRequestsPerSecond() {
// optional uint32 numberOfRequests = 1;
private int numberOfRequests_ ;
public boolean hasNumberOfRequests() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public int getRequestsPerSecond() {
return requestsPerSecond_;
public int getNumberOfRequests() {
return numberOfRequests_;
}
public Builder setRequestsPerSecond(int value) {
public Builder setNumberOfRequests(int value) {
bitField0_ |= 0x00000001;
requestsPerSecond_ = value;
numberOfRequests_ = value;
onChanged();
return this;
}
public Builder clearRequestsPerSecond() {
public Builder clearNumberOfRequests() {
bitField0_ = (bitField0_ & ~0x00000001);
requestsPerSecond_ = 0;
numberOfRequests_ = 0;
onChanged();
return this;
}
@ -6658,6 +6748,48 @@ public final class HBaseProtos {
return coprocessorsBuilder_;
}
// optional uint64 reportStartTime = 7;
private long reportStartTime_ ;
public boolean hasReportStartTime() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
public long getReportStartTime() {
return reportStartTime_;
}
public Builder setReportStartTime(long value) {
bitField0_ |= 0x00000040;
reportStartTime_ = value;
onChanged();
return this;
}
public Builder clearReportStartTime() {
bitField0_ = (bitField0_ & ~0x00000040);
reportStartTime_ = 0L;
onChanged();
return this;
}
// optional uint64 reportEndTime = 8;
private long reportEndTime_ ;
public boolean hasReportEndTime() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
public long getReportEndTime() {
return reportEndTime_;
}
public Builder setReportEndTime(long value) {
bitField0_ |= 0x00000080;
reportEndTime_ = value;
onChanged();
return this;
}
public Builder clearReportEndTime() {
bitField0_ = (bitField0_ & ~0x00000080);
reportEndTime_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:ServerLoad)
}
@ -9932,24 +10064,25 @@ public final class HBaseProtos {
"dKVs\030\013 \001(\004\022\027\n\017rootIndexSizeKB\030\014 \001(\r\022\036\n\026t" +
"otalStaticIndexSizeKB\030\r \001(\r\022\036\n\026totalStat" +
"icBloomSizeKB\030\016 \001(\r\022\"\n\014coprocessors\030\017 \003(" +
"\0132\014.Coprocessor\"\263\001\n\nServerLoad\022\031\n\021reques" +
"tsPerSecond\030\001 \001(\r\022\035\n\025totalNumberOfReques" +
"ts\030\002 \001(\r\022\022\n\nusedHeapMB\030\003 \001(\r\022\021\n\tmaxHeapM" +
"B\030\004 \001(\r\022 \n\013regionLoads\030\005 \003(\0132\013.RegionLoa" +
"d\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocessor\"%\n",
"\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"w\n\010" +
"KeyValue\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n" +
"\tqualifier\030\003 \002(\014\022\021\n\ttimestamp\030\004 \001(\004\022\031\n\007k" +
"eyType\030\005 \001(\0162\010.KeyType\022\r\n\005value\030\006 \001(\014\"?\n" +
"\nServerName\022\020\n\010hostName\030\001 \002(\t\022\014\n\004port\030\002 " +
"\001(\r\022\021\n\tstartCode\030\003 \001(\004\"\033\n\013Coprocessor\022\014\n" +
"\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n\004name\030\001 " +
"\002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesPair\022\014\n\004n" +
"ame\030\001 \002(\t\022\r\n\005value\030\002 \001(\014*_\n\007KeyType\022\013\n\007M" +
"INIMUM\020\000\022\007\n\003PUT\020\004\022\n\n\006DELETE\020\010\022\021\n\rDELETE_",
"COLUMN\020\014\022\021\n\rDELETE_FAMILY\020\016\022\014\n\007MAXIMUM\020\377" +
"\001B>\n*org.apache.hadoop.hbase.protobuf.ge" +
"neratedB\013HBaseProtosH\001\240\001\001"
"\0132\014.Coprocessor\"\342\001\n\nServerLoad\022\030\n\020number" +
"OfRequests\030\001 \001(\r\022\035\n\025totalNumberOfRequest" +
"s\030\002 \001(\r\022\022\n\nusedHeapMB\030\003 \001(\r\022\021\n\tmaxHeapMB" +
"\030\004 \001(\r\022 \n\013regionLoads\030\005 \003(\0132\013.RegionLoad" +
"\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocessor\022\027\n\017",
"reportStartTime\030\007 \001(\004\022\025\n\rreportEndTime\030\010" +
" \001(\004\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 " +
"\001(\004\"w\n\010KeyValue\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002" +
" \002(\014\022\021\n\tqualifier\030\003 \002(\014\022\021\n\ttimestamp\030\004 \001" +
"(\004\022\031\n\007keyType\030\005 \001(\0162\010.KeyType\022\r\n\005value\030\006" +
" \001(\014\"?\n\nServerName\022\020\n\010hostName\030\001 \002(\t\022\014\n\004" +
"port\030\002 \001(\r\022\021\n\tstartCode\030\003 \001(\004\"\033\n\013Coproce" +
"ssor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n\004" +
"name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesPa" +
"ir\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014*_\n\007KeyTy",
"pe\022\013\n\007MINIMUM\020\000\022\007\n\003PUT\020\004\022\n\n\006DELETE\020\010\022\021\n\r" +
"DELETE_COLUMN\020\014\022\021\n\rDELETE_FAMILY\020\016\022\014\n\007MA" +
"XIMUM\020\377\001B>\n*org.apache.hadoop.hbase.prot" +
"obuf.generatedB\013HBaseProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -10017,7 +10150,7 @@ public final class HBaseProtos {
internal_static_ServerLoad_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ServerLoad_descriptor,
new java.lang.String[] { "RequestsPerSecond", "TotalNumberOfRequests", "UsedHeapMB", "MaxHeapMB", "RegionLoads", "Coprocessors", },
new java.lang.String[] { "NumberOfRequests", "TotalNumberOfRequests", "UsedHeapMB", "MaxHeapMB", "RegionLoads", "Coprocessors", "ReportStartTime", "ReportEndTime", },
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder.class);
internal_static_TimeRange_descriptor =

View File

@ -781,7 +781,7 @@ public class HRegionServer implements ClientProtocol,
long now = System.currentTimeMillis();
if ((now - lastMsg) >= msgInterval) {
doMetrics();
tryRegionServerReport();
tryRegionServerReport(lastMsg, now);
lastMsg = System.currentTimeMillis();
}
if (!this.stopped) this.sleeper.sleep();
@ -887,9 +887,9 @@ public class HRegionServer implements ClientProtocol,
return allUserRegionsOffline;
}
void tryRegionServerReport()
void tryRegionServerReport(long reportStartTime, long reportEndTime)
throws IOException {
HBaseProtos.ServerLoad sl = buildServerLoad();
HBaseProtos.ServerLoad sl = buildServerLoad(reportStartTime, reportEndTime);
// Why we do this?
this.requestCount.set(0);
try {
@ -911,13 +911,13 @@ public class HRegionServer implements ClientProtocol,
}
}
HBaseProtos.ServerLoad buildServerLoad() {
HBaseProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) {
Collection<HRegion> regions = getOnlineRegionsLocalContext();
MemoryUsage memory =
ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
HBaseProtos.ServerLoad.Builder serverLoad = HBaseProtos.ServerLoad.newBuilder();
serverLoad.setRequestsPerSecond((int)metrics.getRequests());
serverLoad.setNumberOfRequests((int)metrics.getRequests());
serverLoad.setTotalNumberOfRequests(requestCount.get());
serverLoad.setUsedHeapMB((int)(memory.getUsed() / 1024 / 1024));
serverLoad.setMaxHeapMB((int) (memory.getMax() / 1024 / 1024));
@ -929,6 +929,8 @@ public class HRegionServer implements ClientProtocol,
for (HRegion region : regions) {
serverLoad.addRegionLoads(createRegionLoad(region));
}
serverLoad.setReportStartTime(reportStartTime);
serverLoad.setReportEndTime(reportEndTime);
return serverLoad.build();
}
@ -2313,9 +2315,10 @@ public class HRegionServer implements ClientProtocol,
// used by org/apache/hbase/tmpl/regionserver/RSStatusTmpl.jamon (HBASE-4070).
public String[] getCoprocessors() {
HBaseProtos.ServerLoad sl = buildServerLoad();
// passing fake times to buildServerLoad is okay, because we only care about the coprocessor part.
HBaseProtos.ServerLoad sl = buildServerLoad(0, 0);
return sl == null? null:
ServerLoad.getRegionServerCoprocessors(new ServerLoad(sl));
new ServerLoad(sl).getRegionServerCoprocessors();
}
/**

View File

@ -36,7 +36,8 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HServerLoad;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
@ -76,7 +77,7 @@ public class StorageClusterStatusResource extends ResourceBase {
model.setRequests(status.getRequestsCount());
model.setAverageLoad(status.getAverageLoad());
for (ServerName info: status.getServers()) {
HServerLoad load = status.getLoad(info);
ServerLoad load = status.getLoad(info);
StorageClusterStatusModel.Node node =
model.addLiveNode(
info.getHostname() + ":" +
@ -84,7 +85,7 @@ public class StorageClusterStatusResource extends ResourceBase {
info.getStartcode(), load.getUsedHeapMB(),
load.getMaxHeapMB());
node.setRequests(load.getNumberOfRequests());
for (HServerLoad.RegionLoad region: load.getRegionsLoad().values()) {
for (RegionLoad region: load.getRegionsLoad().values()) {
node.addRegion(region.getName(), region.getStores(),
region.getStorefiles(), region.getStorefileSizeMB(),
region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB(),

View File

@ -137,8 +137,8 @@ message RegionLoad {
/* Server-level protobufs */
message ServerLoad {
/** Number of requests per second since last report. */
optional uint32 requestsPerSecond = 1;
/** Number of requests since last report. */
optional uint32 numberOfRequests = 1;
/** Total Number of requests from the start of the region server. */
optional uint32 totalNumberOfRequests = 2;
@ -158,6 +158,20 @@ message ServerLoad {
* objects.
*/
repeated Coprocessor coprocessors = 6;
/**
* Time when incremental (non-total) counts began being calculated (e.g. numberOfRequests)
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
*/
optional uint64 reportStartTime = 7;
/**
* Time when report was generated.
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
*/
optional uint64 reportEndTime = 8;
}
/**

View File

@ -26,7 +26,7 @@
import="org.apache.hadoop.hbase.HRegionInfo"
import="org.apache.hadoop.hbase.ServerName"
import="org.apache.hadoop.hbase.ServerLoad"
import="org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad"
import="org.apache.hadoop.hbase.RegionLoad"
import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.util.Bytes"
import="org.apache.hadoop.hbase.util.FSUtils"
@ -269,13 +269,9 @@
if (addr != null) {
ServerLoad sl = master.getServerManager().getLoad(addr);
if (sl != null) {
List<RegionLoad> list = sl.getRegionLoadsList();
byte [] regionName = regionInfo.getRegionName();
for (RegionLoad rgLoad : list) {
if (rgLoad.getRegionSpecifier().getValue().toByteArray().equals(regionName)) {
req = ProtobufUtil.getTotalRequestsCount(rgLoad);
break;
}
Map<byte[], RegionLoad> map = sl.getRegionsLoad();
if (map.containsKey(regionInfo.getRegionName())) {
req = map.get(regionInfo.getRegionName()).getRequestsCount();
}
// This port might be wrong if RS actually ended up using something else.
urlRegionServer =

View File

@ -66,18 +66,6 @@ public class TestSerialization {
assertTrue(slt.equals(sltDeserialized));
}
@Test
public void testHServerLoadVersioning() throws IOException {
Set<String> cps = new HashSet<String>(0);
Map<byte [], RegionLoad> regions = new TreeMap<byte [], RegionLoad>(Bytes.BYTES_COMPARATOR);
regions.put(HConstants.META_TABLE_NAME,
new HServerLoad092.RegionLoad(HConstants.META_TABLE_NAME, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, cps));
HServerLoad092 hsl092 = new HServerLoad092(0, 0, 0, 0, regions, cps);
byte [] hsl092bytes = Writables.getBytes(hsl092);
Writables.getWritable(hsl092bytes, new HServerLoad());
// TO BE CONTINUED
}
@Test public void testCompareFilter() throws Exception {
Filter f = new RowFilter(CompareOp.EQUAL,
new BinaryComparator(Bytes.toBytes("testRowOne-2")));

View File

@ -38,8 +38,8 @@ public class TestServerLoad {
assertEquals(114, sl.getStorefiles());
assertEquals(129, sl.getStoreUncompressedSizeMB());
assertEquals(504, sl.getRootIndexSizeKB());
assertEquals(820, sl.getStorefileSizeMB());
assertEquals(82, sl.getStorefileIndexSizeMB());
assertEquals(820, sl.getStorefileSizeInMB());
assertEquals(82, sl.getStorefileIndexSizeInMB());
assertEquals(0, sl.getReadRequestsCount());
}

View File

@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
import org.apache.hadoop.hbase.RegionLoad;
import javax.tools.*;
import java.io.*;
@ -571,9 +571,10 @@ public class TestClassLoading {
for(Map.Entry<ServerName,ServerLoad> server:
TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager().
getOnlineServers().entrySet()) {
for (RegionLoad region : server.getValue().getRegionLoadsList()) {
if (Bytes.toString(region.getRegionSpecifier().getValue().toByteArray()).equals(tableName)) {
// this server server hosts a region of tableName: add this server..
for( Map.Entry<byte[], RegionLoad> region:
server.getValue().getRegionsLoad().entrySet()) {
if (region.getValue().getNameAsString().equals(tableName)) {
// this server hosts a region of tableName: add this server..
serverLoadHashMap.put(server.getKey(),server.getValue());
// .. and skip the rest of the regions that it hosts.
break;
@ -599,8 +600,7 @@ public class TestClassLoading {
}
boolean any_failed = false;
for(Map.Entry<ServerName,ServerLoad> server: servers.entrySet()) {
actualCoprocessors =
ServerLoad.getAllCoprocessors(server.getValue());
actualCoprocessors = server.getValue().getAllCoprocessors();
if (!Arrays.equals(actualCoprocessors, expectedCoprocessors)) {
LOG.debug("failed comparison: actual: " +
Arrays.toString(actualCoprocessors) +

View File

@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HServerLoad;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.SmallTests;
@ -524,7 +523,6 @@ public class TestHbaseObjectWritable extends TestCase {
assertEquals(72,HbaseObjectWritable.getClassCode(RandomRowFilter.class).intValue());
assertEquals(73,HbaseObjectWritable.getClassCode(CompareOp.class).intValue());
assertEquals(74,HbaseObjectWritable.getClassCode(ColumnRangeFilter.class).intValue());
assertEquals(75,HbaseObjectWritable.getClassCode(HServerLoad.class).intValue());
assertEquals(76,HbaseObjectWritable.getClassCode(RegionOpeningState.class).intValue());
assertEquals(77,HbaseObjectWritable.getClassCode(HTableDescriptor[].class).intValue());
assertEquals(78,HbaseObjectWritable.getClassCode(Append.class).intValue());

View File

@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.DeserializationException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerLoad;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;