HBASE-1665 expose more load information to the client side
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@794902 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
8439231fe7
commit
53b3084c85
24
CHANGES.txt
24
CHANGES.txt
|
@ -102,8 +102,8 @@ Release 0.20.0 - Unreleased
|
||||||
HBASE-1292 php thrift's getRow() would throw an exception if the row does
|
HBASE-1292 php thrift's getRow() would throw an exception if the row does
|
||||||
not exist (Rong-en Fan via Stack)
|
not exist (Rong-en Fan via Stack)
|
||||||
HBASE-1340 Fix new javadoc warnings (Evgeny Ryabitskiy via Stack)
|
HBASE-1340 Fix new javadoc warnings (Evgeny Ryabitskiy via Stack)
|
||||||
HBASE-1287 Partitioner class not used in TableMapReduceUtil.initTableReduceJob()
|
HBASE-1287 Partitioner class not used in TableMapReduceUtil
|
||||||
(Lars George and Billy Pearson via Stack)
|
.initTableReduceJob() (Lars George and Billy Pearson via Stack)
|
||||||
HBASE-1320 hbase-1234 broke filter tests
|
HBASE-1320 hbase-1234 broke filter tests
|
||||||
HBASE-1355 [performance] Cache family maxversions; we were calculating on
|
HBASE-1355 [performance] Cache family maxversions; we were calculating on
|
||||||
each access
|
each access
|
||||||
|
@ -238,16 +238,17 @@ Release 0.20.0 - Unreleased
|
||||||
HBASE-1437 broken links in hbase.org
|
HBASE-1437 broken links in hbase.org
|
||||||
HBASE-1582 Translate ColumnValueFilter and RowFilterSet to the new Filter
|
HBASE-1582 Translate ColumnValueFilter and RowFilterSet to the new Filter
|
||||||
interface
|
interface
|
||||||
HBASE-1594 Fix scan addcolumns after hbase-1385 commit (broken hudson build)
|
HBASE-1594 Fix scan addcolumns after hbase-1385 commit (broke hudson build)
|
||||||
HBASE-1595 hadoop-default.xml and zoo.cfg in hbase jar
|
HBASE-1595 hadoop-default.xml and zoo.cfg in hbase jar
|
||||||
HBASE-1602 HRegionServer won't go down since we added in new LruBlockCache
|
HBASE-1602 HRegionServer won't go down since we added in new LruBlockCache
|
||||||
HBASE-1608 TestCachedBlockQueue failing on some jvms (Jon Gray via Stack)
|
HBASE-1608 TestCachedBlockQueue failing on some jvms (Jon Gray via Stack)
|
||||||
HBASE-1615 HBASE-1597 introduced a bug when compacting after a split
|
HBASE-1615 HBASE-1597 introduced a bug when compacting after a split
|
||||||
(Jon Gray via Stack)
|
(Jon Gray via Stack)
|
||||||
HBASE-1616 Unit test of compacting referenced StoreFiles (Jon Gray via Stack)
|
HBASE-1616 Unit test of compacting referenced StoreFiles (Jon Gray via
|
||||||
|
Stack)
|
||||||
HBASE-1618 Investigate further into the MemStoreFlusher StoreFile limit
|
HBASE-1618 Investigate further into the MemStoreFlusher StoreFile limit
|
||||||
(Jon Gray via Stack)
|
(Jon Gray via Stack)
|
||||||
HBASE-1625 Adding check to Put.add(KeyValue kv), to see that it has the same
|
HBASE-1625 Adding check to Put.add(KeyValue), to see that it has the same
|
||||||
row as when instantiated (Erik Holstad via Stack)
|
row as when instantiated (Erik Holstad via Stack)
|
||||||
HBASE-1629 HRS unable to contact master
|
HBASE-1629 HRS unable to contact master
|
||||||
HBASE-1633 Can't delete in TRUNK shell; makes it hard doing admin repairs
|
HBASE-1633 Can't delete in TRUNK shell; makes it hard doing admin repairs
|
||||||
|
@ -453,7 +454,8 @@ Release 0.20.0 - Unreleased
|
||||||
(Erik Holstad via Stack)
|
(Erik Holstad via Stack)
|
||||||
HBASE-1584 Put add methods should return this for ease of use (Be
|
HBASE-1584 Put add methods should return this for ease of use (Be
|
||||||
consistant with Get) (Clint Morgan via Stack)
|
consistant with Get) (Clint Morgan via Stack)
|
||||||
HBASE-1581 Run major compaction on .META. when table is dropped or truncated
|
HBASE-1581 Run major compaction on .META. when table is dropped or
|
||||||
|
truncated
|
||||||
HBASE-1587 Update ganglia config and doc to account for ganglia 3.1 and
|
HBASE-1587 Update ganglia config and doc to account for ganglia 3.1 and
|
||||||
hadoop-4675
|
hadoop-4675
|
||||||
HBASE-1589 Up zk maxClientCnxns from default of 10 to 20 or 30 or so
|
HBASE-1589 Up zk maxClientCnxns from default of 10 to 20 or 30 or so
|
||||||
|
@ -469,23 +471,25 @@ Release 0.20.0 - Unreleased
|
||||||
HBASE-1218 Implement in-memory column (Jon Gray via Stack)
|
HBASE-1218 Implement in-memory column (Jon Gray via Stack)
|
||||||
HBASE-1606 Remove zoo.cfg, put config options into hbase-site.xml
|
HBASE-1606 Remove zoo.cfg, put config options into hbase-site.xml
|
||||||
HBASE-1575 HMaster does not handle ZK session expiration
|
HBASE-1575 HMaster does not handle ZK session expiration
|
||||||
HBASE-1620 Need to use special StoreScanner constructor for major compactions
|
HBASE-1620 Need to use special StoreScanner constructor for major
|
||||||
(passed sf, no caching, etc) (Jon Gray via Stack)
|
compactions (passed sf, no caching, etc) (Jon Gray via Stack)
|
||||||
HBASE-1624 Don't sort Puts if only one in list in HCM#processBatchOfRows
|
HBASE-1624 Don't sort Puts if only one in list in HCM#processBatchOfRows
|
||||||
HBASE-1626 Allow emitting Deletes out of new TableReducer
|
HBASE-1626 Allow emitting Deletes out of new TableReducer
|
||||||
(Lars George via Stack)
|
(Lars George via Stack)
|
||||||
HBASE-1551 HBase should manage multiple node ZooKeeper quorum
|
HBASE-1551 HBase should manage multiple node ZooKeeper quorum
|
||||||
HBASE-1637 Delete client class methods should return itself like Put, Get,
|
HBASE-1637 Delete client class methods should return itself like Put, Get,
|
||||||
Scan (Jon Gray via Nitay)
|
Scan (Jon Gray via Nitay)
|
||||||
HBASE-1640 Allow passing arguments to jruby script run when run by bin/hbase shell
|
HBASE-1640 Allow passing arguments to jruby script run when run by hbase
|
||||||
|
shell
|
||||||
HBASE-698 HLog recovery is not performed after master failure
|
HBASE-698 HLog recovery is not performed after master failure
|
||||||
HBASE-1643 ScanDeleteTracker takes comparator but it unused
|
HBASE-1643 ScanDeleteTracker takes comparator but it unused
|
||||||
HBASE-1603 MR failed "RetriesExhaustedException: Trying to contact region
|
HBASE-1603 MR failed "RetriesExhaustedException: Trying to contact region
|
||||||
server Some server for region TestTable..." -- deubugging
|
server Some server for region TestTable..." -- deubugging
|
||||||
HBASE-1470 hbase and HADOOP-4379, dhruba's flush/sync
|
HBASE-1470 hbase and HADOOP-4379, dhruba's flush/sync
|
||||||
HBASE-1632 Write documentation for configuring/managing ZooKeeper with HBase
|
HBASE-1632 Write documentation for configuring/managing ZooKeeper
|
||||||
HBASE-1662 Tool to run major compaction on catalog regions when hbase is
|
HBASE-1662 Tool to run major compaction on catalog regions when hbase is
|
||||||
shutdown
|
shutdown
|
||||||
|
HBASE-1665 expose more load information to the client side
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
HBASE-1412 Change values for delete column and column family in KeyValue
|
HBASE-1412 Change values for delete column and column family in KeyValue
|
||||||
|
|
|
@ -70,15 +70,18 @@ public class StorageClusterStatusResource implements Constants {
|
||||||
model.setRequests(status.getRequestsCount());
|
model.setRequests(status.getRequestsCount());
|
||||||
model.setAverageLoad(status.getAverageLoad());
|
model.setAverageLoad(status.getAverageLoad());
|
||||||
for (HServerInfo info: status.getServerInfo()) {
|
for (HServerInfo info: status.getServerInfo()) {
|
||||||
|
HServerLoad load = info.getLoad();
|
||||||
StorageClusterStatusModel.Node node =
|
StorageClusterStatusModel.Node node =
|
||||||
model.addLiveNode(
|
model.addLiveNode(
|
||||||
info.getServerAddress().getHostname() + ":" +
|
info.getServerAddress().getHostname() + ":" +
|
||||||
Integer.toString(info.getServerAddress().getPort()),
|
Integer.toString(info.getServerAddress().getPort()),
|
||||||
info.getStartCode());
|
info.getStartCode(), load.getUsedHeapMB(),
|
||||||
HServerLoad load = info.getLoad();
|
load.getMaxHeapMB());
|
||||||
node.setRequests(load.getNumberOfRequests());
|
node.setRequests(load.getNumberOfRequests());
|
||||||
for (HServerLoad.RegionLoad region: load.getRegionsLoad()) {
|
for (HServerLoad.RegionLoad region: load.getRegionsLoad()) {
|
||||||
node.addRegion(region.getName());
|
node.addRegion(region.getName(), region.getStores(),
|
||||||
|
region.getStorefiles(), region.getStorefileSizeMB(),
|
||||||
|
region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (String name: status.getDeadServerNames()) {
|
for (String name: status.getDeadServerNames()) {
|
||||||
|
|
|
@ -71,10 +71,17 @@ import com.google.protobuf.ByteString;
|
||||||
* <attribute name="name" type="string"></attribute>
|
* <attribute name="name" type="string"></attribute>
|
||||||
* <attribute name="startCode" type="int"></attribute>
|
* <attribute name="startCode" type="int"></attribute>
|
||||||
* <attribute name="requests" type="int"></attribute>
|
* <attribute name="requests" type="int"></attribute>
|
||||||
|
* <attribute name="heapSizeMB" type="int"></attribute>
|
||||||
|
* <attribute name="maxHeapSizeMB" type="int"></attribute>
|
||||||
* </complexType>
|
* </complexType>
|
||||||
*
|
*
|
||||||
* <complexType name="Region">
|
* <complexType name="Region">
|
||||||
* <attribute name="name" type="base64Binary"></attribute>
|
* <attribute name="name" type="base64Binary"></attribute>
|
||||||
|
* <attribute name="stores" type="int"></attribute>
|
||||||
|
* <attribute name="storefiles" type="int"></attribute>
|
||||||
|
* <attribute name="storefileSizeMB" type="int"></attribute>
|
||||||
|
* <attribute name="memstoreSizeMB" type="int"></attribute>
|
||||||
|
* <attribute name="storefileIndexSizeMB" type="int"></attribute>
|
||||||
* </complexType>
|
* </complexType>
|
||||||
* </pre>
|
* </pre>
|
||||||
*/
|
*/
|
||||||
|
@ -93,6 +100,11 @@ public class StorageClusterStatusModel
|
||||||
*/
|
*/
|
||||||
public static class Region {
|
public static class Region {
|
||||||
private byte[] name;
|
private byte[] name;
|
||||||
|
private int stores;
|
||||||
|
private int storefiles;
|
||||||
|
private int storefileSizeMB;
|
||||||
|
private int memstoreSizeMB;
|
||||||
|
private int storefileIndexSizeMB;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Default constructor
|
* Default constructor
|
||||||
|
@ -107,6 +119,25 @@ public class StorageClusterStatusModel
|
||||||
this.name = name;
|
this.name = name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor
|
||||||
|
* @param name the region name
|
||||||
|
* @param stores the number of stores
|
||||||
|
* @param storefiles the number of store files
|
||||||
|
* @param storefileSizeMB total size of store files, in MB
|
||||||
|
* @param memstoreSizeMB total size of memstore, in MB
|
||||||
|
* @param storefileIndexSizeMB, total size of store file indexes, in MB
|
||||||
|
*/
|
||||||
|
public Region(byte[] name, int stores, int storefiles,
|
||||||
|
int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB) {
|
||||||
|
this.name = name;
|
||||||
|
this.stores = stores;
|
||||||
|
this.storefiles = storefiles;
|
||||||
|
this.storefileSizeMB = storefileSizeMB;
|
||||||
|
this.memstoreSizeMB = memstoreSizeMB;
|
||||||
|
this.storefileIndexSizeMB = storefileIndexSizeMB;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the region name
|
* @return the region name
|
||||||
*/
|
*/
|
||||||
|
@ -115,25 +146,104 @@ public class StorageClusterStatusModel
|
||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the number of stores
|
||||||
|
*/
|
||||||
|
@XmlAttribute
|
||||||
|
public int getStores() {
|
||||||
|
return stores;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the number of store files
|
||||||
|
*/
|
||||||
|
@XmlAttribute
|
||||||
|
public int getStorefiles() {
|
||||||
|
return storefiles;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the total size of store files, in MB
|
||||||
|
*/
|
||||||
|
@XmlAttribute
|
||||||
|
public int getStorefileSizeMB() {
|
||||||
|
return storefileSizeMB;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return memstore size, in MB
|
||||||
|
*/
|
||||||
|
@XmlAttribute
|
||||||
|
public int getMemstoreSizeMB() {
|
||||||
|
return memstoreSizeMB;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the total size of store file indexes, in MB
|
||||||
|
*/
|
||||||
|
@XmlAttribute
|
||||||
|
public int getStorefileIndexSizeMB() {
|
||||||
|
return storefileIndexSizeMB;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param name the region name
|
* @param name the region name
|
||||||
*/
|
*/
|
||||||
public void setName(byte[] name) {
|
public void setName(byte[] name) {
|
||||||
this.name = name;
|
this.name = name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param stores the number of stores
|
||||||
|
*/
|
||||||
|
public void setStores(int stores) {
|
||||||
|
this.stores = stores;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param storefiles the number of store files
|
||||||
|
*/
|
||||||
|
public void setStorefiles(int storefiles) {
|
||||||
|
this.storefiles = storefiles;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param storefileSizeMB total size of store files, in MB
|
||||||
|
*/
|
||||||
|
public void setStorefileSizeMB(int storefileSizeMB) {
|
||||||
|
this.storefileSizeMB = storefileSizeMB;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param memstoreSizeMB memstore size, in MB
|
||||||
|
*/
|
||||||
|
public void setMemstoreSizeMB(int memstoreSizeMB) {
|
||||||
|
this.memstoreSizeMB = memstoreSizeMB;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param storefileIndexSizeMB total size of store file indexes, in MB
|
||||||
|
*/
|
||||||
|
public void setStorefileIndexSizeMB(int storefileIndexSizeMB) {
|
||||||
|
this.storefileIndexSizeMB = storefileIndexSizeMB;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private String name;
|
private String name;
|
||||||
private long startCode;
|
private long startCode;
|
||||||
private int requests;
|
private int requests;
|
||||||
|
private int heapSizeMB;
|
||||||
|
private int maxHeapSizeMB;
|
||||||
private List<Region> regions = new ArrayList<Region>();
|
private List<Region> regions = new ArrayList<Region>();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a region name to the list
|
* Add a region name to the list
|
||||||
* @param name the region name
|
* @param name the region name
|
||||||
*/
|
*/
|
||||||
public void addRegion(byte[] name) {
|
public void addRegion(byte[] name, int stores, int storefiles,
|
||||||
regions.add(new Region(name));
|
int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB) {
|
||||||
|
regions.add(new Region(name, stores, storefiles, storefileSizeMB,
|
||||||
|
memstoreSizeMB, storefileIndexSizeMB));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -175,6 +285,22 @@ public class StorageClusterStatusModel
|
||||||
return startCode;
|
return startCode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the current heap size, in MB
|
||||||
|
*/
|
||||||
|
@XmlAttribute
|
||||||
|
public int getHeapSizeMB() {
|
||||||
|
return heapSizeMB;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the maximum heap size, in MB
|
||||||
|
*/
|
||||||
|
@XmlAttribute
|
||||||
|
public int getMaxHeapSizeMB() {
|
||||||
|
return maxHeapSizeMB;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the list of regions served by the region server
|
* @return the list of regions served by the region server
|
||||||
*/
|
*/
|
||||||
|
@ -205,6 +331,20 @@ public class StorageClusterStatusModel
|
||||||
this.startCode = startCode;
|
this.startCode = startCode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param heapSizeMB the current heap size, in MB
|
||||||
|
*/
|
||||||
|
public void setHeapSizeMB(int heapSizeMB) {
|
||||||
|
this.heapSizeMB = heapSizeMB;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param maxHeapSizeMB the maximum heap size, in MB
|
||||||
|
*/
|
||||||
|
public void setMaxHeapSizeMB(int maxHeapSizeMB) {
|
||||||
|
this.maxHeapSizeMB = maxHeapSizeMB;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param regions a list of regions served by the region server
|
* @param regions a list of regions served by the region server
|
||||||
*/
|
*/
|
||||||
|
@ -231,9 +371,14 @@ public class StorageClusterStatusModel
|
||||||
* Add a live node to the cluster representation.
|
* Add a live node to the cluster representation.
|
||||||
* @param name the region server name
|
* @param name the region server name
|
||||||
* @param startCode the region server's start code
|
* @param startCode the region server's start code
|
||||||
|
* @param heapSizeMB the current heap size, in MB
|
||||||
|
* @param maxHeapSizeMB the maximum heap size, in MB
|
||||||
*/
|
*/
|
||||||
public Node addLiveNode(String name, long startCode) {
|
public Node addLiveNode(String name, long startCode, int heapSizeMB,
|
||||||
|
int maxHeapSizeMB) {
|
||||||
Node node = new Node(name, startCode);
|
Node node = new Node(name, startCode);
|
||||||
|
node.setHeapSizeMB(heapSizeMB);
|
||||||
|
node.setMaxHeapSizeMB(maxHeapSizeMB);
|
||||||
liveNodes.add(node);
|
liveNodes.add(node);
|
||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
|
@ -367,10 +512,24 @@ public class StorageClusterStatusModel
|
||||||
sb.append(node.requests);
|
sb.append(node.requests);
|
||||||
sb.append(", regions=");
|
sb.append(", regions=");
|
||||||
sb.append(node.regions.size());
|
sb.append(node.regions.size());
|
||||||
|
sb.append("\n heapSizeMB=");
|
||||||
|
sb.append(node.heapSizeMB);
|
||||||
|
sb.append("\n maxHeapSizeMB=");
|
||||||
|
sb.append(node.maxHeapSizeMB);
|
||||||
sb.append("\n\n");
|
sb.append("\n\n");
|
||||||
for (Node.Region region: node.regions) {
|
for (Node.Region region: node.regions) {
|
||||||
sb.append(" ");
|
sb.append(" ");
|
||||||
sb.append(Bytes.toString(region.name));
|
sb.append(Bytes.toString(region.name));
|
||||||
|
sb.append("\n stores=");
|
||||||
|
sb.append(region.stores);
|
||||||
|
sb.append("\n storefiless=");
|
||||||
|
sb.append(region.storefiles);
|
||||||
|
sb.append("\n storefileSizeMB=");
|
||||||
|
sb.append(region.storefileSizeMB);
|
||||||
|
sb.append("\n memstoreSizeMB=");
|
||||||
|
sb.append(region.memstoreSizeMB);
|
||||||
|
sb.append("\n storefileIndexSizeMB=");
|
||||||
|
sb.append(region.storefileIndexSizeMB);
|
||||||
sb.append('\n');
|
sb.append('\n');
|
||||||
}
|
}
|
||||||
sb.append('\n');
|
sb.append('\n');
|
||||||
|
@ -401,8 +560,18 @@ public class StorageClusterStatusModel
|
||||||
nodeBuilder.setName(node.name);
|
nodeBuilder.setName(node.name);
|
||||||
nodeBuilder.setStartCode(node.startCode);
|
nodeBuilder.setStartCode(node.startCode);
|
||||||
nodeBuilder.setRequests(node.requests);
|
nodeBuilder.setRequests(node.requests);
|
||||||
|
nodeBuilder.setHeapSizeMB(node.heapSizeMB);
|
||||||
|
nodeBuilder.setMaxHeapSizeMB(node.maxHeapSizeMB);
|
||||||
for (Node.Region region: node.regions) {
|
for (Node.Region region: node.regions) {
|
||||||
nodeBuilder.addRegions(ByteString.copyFrom(region.name));
|
StorageClusterStatus.Region.Builder regionBuilder =
|
||||||
|
StorageClusterStatus.Region.newBuilder();
|
||||||
|
regionBuilder.setName(ByteString.copyFrom(region.name));
|
||||||
|
regionBuilder.setStores(region.stores);
|
||||||
|
regionBuilder.setStorefiles(region.storefiles);
|
||||||
|
regionBuilder.setStorefileSizeMB(region.storefileSizeMB);
|
||||||
|
regionBuilder.setMemstoreSizeMB(region.memstoreSizeMB);
|
||||||
|
regionBuilder.setStorefileIndexSizeMB(region.storefileIndexSizeMB);
|
||||||
|
nodeBuilder.addRegions(regionBuilder);
|
||||||
}
|
}
|
||||||
builder.addLiveNodes(nodeBuilder);
|
builder.addLiveNodes(nodeBuilder);
|
||||||
}
|
}
|
||||||
|
@ -429,11 +598,18 @@ public class StorageClusterStatusModel
|
||||||
for (StorageClusterStatus.Node node: builder.getLiveNodesList()) {
|
for (StorageClusterStatus.Node node: builder.getLiveNodesList()) {
|
||||||
long startCode = node.hasStartCode() ? node.getStartCode() : -1;
|
long startCode = node.hasStartCode() ? node.getStartCode() : -1;
|
||||||
StorageClusterStatusModel.Node nodeModel =
|
StorageClusterStatusModel.Node nodeModel =
|
||||||
addLiveNode(node.getName(), startCode);
|
addLiveNode(node.getName(), startCode, node.getHeapSizeMB(),
|
||||||
|
node.getMaxHeapSizeMB());
|
||||||
int requests = node.hasRequests() ? node.getRequests() : 0;
|
int requests = node.hasRequests() ? node.getRequests() : 0;
|
||||||
nodeModel.setRequests(requests);
|
nodeModel.setRequests(requests);
|
||||||
for (ByteString region: node.getRegionsList()) {
|
for (StorageClusterStatus.Region region: node.getRegionsList()) {
|
||||||
nodeModel.addRegion(region.toByteArray());
|
nodeModel.addRegion(
|
||||||
|
region.getName().toByteArray(),
|
||||||
|
region.getStores(),
|
||||||
|
region.getStorefiles(),
|
||||||
|
region.getStorefileSizeMB(),
|
||||||
|
region.getMemstoreSizeMB(),
|
||||||
|
region.getStorefileIndexSizeMB());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (String node: builder.getDeadNodesList()) {
|
for (String node: builder.getDeadNodesList()) {
|
||||||
|
|
|
@ -19,11 +19,21 @@
|
||||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||||
|
|
||||||
message StorageClusterStatus {
|
message StorageClusterStatus {
|
||||||
|
message Region {
|
||||||
|
required bytes name = 1;
|
||||||
|
optional int32 stores = 2;
|
||||||
|
optional int32 storefiles = 3;
|
||||||
|
optional int32 storefileSizeMB = 4;
|
||||||
|
optional int32 memstoreSizeMB = 5;
|
||||||
|
optional int32 storefileIndexSizeMB = 6;
|
||||||
|
}
|
||||||
message Node {
|
message Node {
|
||||||
required string name = 1; // name:port
|
required string name = 1; // name:port
|
||||||
optional int64 startCode = 4;
|
optional int64 startCode = 2;
|
||||||
optional int32 requests = 2;
|
optional int32 requests = 3;
|
||||||
repeated bytes regions = 3;
|
optional int32 heapSizeMB = 4;
|
||||||
|
optional int32 maxHeapSizeMB = 5;
|
||||||
|
repeated Region regions = 6;
|
||||||
}
|
}
|
||||||
// node status
|
// node status
|
||||||
repeated Node liveNodes = 1;
|
repeated Node liveNodes = 1;
|
||||||
|
|
|
@ -32,6 +32,473 @@ public final class StorageClusterStatusMessage {
|
||||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_fieldAccessorTable;
|
return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_fieldAccessorTable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static final class Region extends
|
||||||
|
com.google.protobuf.GeneratedMessage {
|
||||||
|
// Use Region.newBuilder() to construct.
|
||||||
|
private Region() {}
|
||||||
|
|
||||||
|
private static final Region defaultInstance = new Region();
|
||||||
|
public static Region getDefaultInstance() {
|
||||||
|
return defaultInstance;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Region getDefaultInstanceForType() {
|
||||||
|
return defaultInstance;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static final com.google.protobuf.Descriptors.Descriptor
|
||||||
|
getDescriptor() {
|
||||||
|
return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Region_descriptor;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||||
|
internalGetFieldAccessorTable() {
|
||||||
|
return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable;
|
||||||
|
}
|
||||||
|
|
||||||
|
// required bytes name = 1;
|
||||||
|
public static final int NAME_FIELD_NUMBER = 1;
|
||||||
|
private boolean hasName;
|
||||||
|
private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
|
||||||
|
public boolean hasName() { return hasName; }
|
||||||
|
public com.google.protobuf.ByteString getName() { return name_; }
|
||||||
|
|
||||||
|
// optional int32 stores = 2;
|
||||||
|
public static final int STORES_FIELD_NUMBER = 2;
|
||||||
|
private boolean hasStores;
|
||||||
|
private int stores_ = 0;
|
||||||
|
public boolean hasStores() { return hasStores; }
|
||||||
|
public int getStores() { return stores_; }
|
||||||
|
|
||||||
|
// optional int32 storefiles = 3;
|
||||||
|
public static final int STOREFILES_FIELD_NUMBER = 3;
|
||||||
|
private boolean hasStorefiles;
|
||||||
|
private int storefiles_ = 0;
|
||||||
|
public boolean hasStorefiles() { return hasStorefiles; }
|
||||||
|
public int getStorefiles() { return storefiles_; }
|
||||||
|
|
||||||
|
// optional int32 storefileSizeMB = 4;
|
||||||
|
public static final int STOREFILESIZEMB_FIELD_NUMBER = 4;
|
||||||
|
private boolean hasStorefileSizeMB;
|
||||||
|
private int storefileSizeMB_ = 0;
|
||||||
|
public boolean hasStorefileSizeMB() { return hasStorefileSizeMB; }
|
||||||
|
public int getStorefileSizeMB() { return storefileSizeMB_; }
|
||||||
|
|
||||||
|
// optional int32 memstoreSizeMB = 5;
|
||||||
|
public static final int MEMSTORESIZEMB_FIELD_NUMBER = 5;
|
||||||
|
private boolean hasMemstoreSizeMB;
|
||||||
|
private int memstoreSizeMB_ = 0;
|
||||||
|
public boolean hasMemstoreSizeMB() { return hasMemstoreSizeMB; }
|
||||||
|
public int getMemstoreSizeMB() { return memstoreSizeMB_; }
|
||||||
|
|
||||||
|
// optional int32 storefileIndexSizeMB = 6;
|
||||||
|
public static final int STOREFILEINDEXSIZEMB_FIELD_NUMBER = 6;
|
||||||
|
private boolean hasStorefileIndexSizeMB;
|
||||||
|
private int storefileIndexSizeMB_ = 0;
|
||||||
|
public boolean hasStorefileIndexSizeMB() { return hasStorefileIndexSizeMB; }
|
||||||
|
public int getStorefileIndexSizeMB() { return storefileIndexSizeMB_; }
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public final boolean isInitialized() {
|
||||||
|
if (!hasName) return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeTo(com.google.protobuf.CodedOutputStream output)
|
||||||
|
throws java.io.IOException {
|
||||||
|
if (hasName()) {
|
||||||
|
output.writeBytes(1, getName());
|
||||||
|
}
|
||||||
|
if (hasStores()) {
|
||||||
|
output.writeInt32(2, getStores());
|
||||||
|
}
|
||||||
|
if (hasStorefiles()) {
|
||||||
|
output.writeInt32(3, getStorefiles());
|
||||||
|
}
|
||||||
|
if (hasStorefileSizeMB()) {
|
||||||
|
output.writeInt32(4, getStorefileSizeMB());
|
||||||
|
}
|
||||||
|
if (hasMemstoreSizeMB()) {
|
||||||
|
output.writeInt32(5, getMemstoreSizeMB());
|
||||||
|
}
|
||||||
|
if (hasStorefileIndexSizeMB()) {
|
||||||
|
output.writeInt32(6, getStorefileIndexSizeMB());
|
||||||
|
}
|
||||||
|
getUnknownFields().writeTo(output);
|
||||||
|
}
|
||||||
|
|
||||||
|
private int memoizedSerializedSize = -1;
|
||||||
|
@Override
|
||||||
|
public int getSerializedSize() {
|
||||||
|
int size = memoizedSerializedSize;
|
||||||
|
if (size != -1) return size;
|
||||||
|
|
||||||
|
size = 0;
|
||||||
|
if (hasName()) {
|
||||||
|
size += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeBytesSize(1, getName());
|
||||||
|
}
|
||||||
|
if (hasStores()) {
|
||||||
|
size += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeInt32Size(2, getStores());
|
||||||
|
}
|
||||||
|
if (hasStorefiles()) {
|
||||||
|
size += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeInt32Size(3, getStorefiles());
|
||||||
|
}
|
||||||
|
if (hasStorefileSizeMB()) {
|
||||||
|
size += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeInt32Size(4, getStorefileSizeMB());
|
||||||
|
}
|
||||||
|
if (hasMemstoreSizeMB()) {
|
||||||
|
size += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeInt32Size(5, getMemstoreSizeMB());
|
||||||
|
}
|
||||||
|
if (hasStorefileIndexSizeMB()) {
|
||||||
|
size += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeInt32Size(6, getStorefileIndexSizeMB());
|
||||||
|
}
|
||||||
|
size += getUnknownFields().getSerializedSize();
|
||||||
|
memoizedSerializedSize = size;
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
|
||||||
|
com.google.protobuf.ByteString data)
|
||||||
|
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||||
|
return newBuilder().mergeFrom(data).buildParsed();
|
||||||
|
}
|
||||||
|
public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
|
||||||
|
com.google.protobuf.ByteString data,
|
||||||
|
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||||
|
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||||
|
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||||
|
.buildParsed();
|
||||||
|
}
|
||||||
|
public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(byte[] data)
|
||||||
|
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||||
|
return newBuilder().mergeFrom(data).buildParsed();
|
||||||
|
}
|
||||||
|
public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
|
||||||
|
byte[] data,
|
||||||
|
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||||
|
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||||
|
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||||
|
.buildParsed();
|
||||||
|
}
|
||||||
|
public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(java.io.InputStream input)
|
||||||
|
throws java.io.IOException {
|
||||||
|
return newBuilder().mergeFrom(input).buildParsed();
|
||||||
|
}
|
||||||
|
public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
|
||||||
|
java.io.InputStream input,
|
||||||
|
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||||
|
throws java.io.IOException {
|
||||||
|
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||||
|
.buildParsed();
|
||||||
|
}
|
||||||
|
public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(java.io.InputStream input)
|
||||||
|
throws java.io.IOException {
|
||||||
|
return newBuilder().mergeDelimitedFrom(input).buildParsed();
|
||||||
|
}
|
||||||
|
public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(
|
||||||
|
java.io.InputStream input,
|
||||||
|
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||||
|
throws java.io.IOException {
|
||||||
|
return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
|
||||||
|
.buildParsed();
|
||||||
|
}
|
||||||
|
public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
|
||||||
|
com.google.protobuf.CodedInputStream input)
|
||||||
|
throws java.io.IOException {
|
||||||
|
return newBuilder().mergeFrom(input).buildParsed();
|
||||||
|
}
|
||||||
|
public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
|
||||||
|
com.google.protobuf.CodedInputStream input,
|
||||||
|
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||||
|
throws java.io.IOException {
|
||||||
|
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||||
|
.buildParsed();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Builder newBuilder() { return new Builder(); }
|
||||||
|
public Builder newBuilderForType() { return new Builder(); }
|
||||||
|
public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region prototype) {
|
||||||
|
return new Builder().mergeFrom(prototype);
|
||||||
|
}
|
||||||
|
public Builder toBuilder() { return newBuilder(this); }
|
||||||
|
|
||||||
|
public static final class Builder extends
|
||||||
|
com.google.protobuf.GeneratedMessage.Builder<Builder> {
|
||||||
|
// Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.newBuilder()
|
||||||
|
private Builder() {}
|
||||||
|
|
||||||
|
org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region result = new org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region internalGetResult() {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Builder clear() {
|
||||||
|
result = new org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Builder clone() {
|
||||||
|
return new Builder().mergeFrom(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public com.google.protobuf.Descriptors.Descriptor
|
||||||
|
getDescriptorForType() {
|
||||||
|
return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDescriptor();
|
||||||
|
}
|
||||||
|
|
||||||
|
public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getDefaultInstanceForType() {
|
||||||
|
return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance();
|
||||||
|
}
|
||||||
|
|
||||||
|
public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region build() {
|
||||||
|
if (result != null && !isInitialized()) {
|
||||||
|
throw new com.google.protobuf.UninitializedMessageException(
|
||||||
|
result);
|
||||||
|
}
|
||||||
|
return buildPartial();
|
||||||
|
}
|
||||||
|
|
||||||
|
private org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region buildParsed()
|
||||||
|
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||||
|
if (!isInitialized()) {
|
||||||
|
throw new com.google.protobuf.UninitializedMessageException(
|
||||||
|
result).asInvalidProtocolBufferException();
|
||||||
|
}
|
||||||
|
return buildPartial();
|
||||||
|
}
|
||||||
|
|
||||||
|
public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region buildPartial() {
|
||||||
|
if (result == null) {
|
||||||
|
throw new IllegalStateException(
|
||||||
|
"build() has already been called on this Builder."); }
|
||||||
|
org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region returnMe = result;
|
||||||
|
result = null;
|
||||||
|
return returnMe;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Builder mergeFrom(com.google.protobuf.Message other) {
|
||||||
|
if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region) {
|
||||||
|
return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region)other);
|
||||||
|
} else {
|
||||||
|
super.mergeFrom(other);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region other) {
|
||||||
|
if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance()) return this;
|
||||||
|
if (other.hasName()) {
|
||||||
|
setName(other.getName());
|
||||||
|
}
|
||||||
|
if (other.hasStores()) {
|
||||||
|
setStores(other.getStores());
|
||||||
|
}
|
||||||
|
if (other.hasStorefiles()) {
|
||||||
|
setStorefiles(other.getStorefiles());
|
||||||
|
}
|
||||||
|
if (other.hasStorefileSizeMB()) {
|
||||||
|
setStorefileSizeMB(other.getStorefileSizeMB());
|
||||||
|
}
|
||||||
|
if (other.hasMemstoreSizeMB()) {
|
||||||
|
setMemstoreSizeMB(other.getMemstoreSizeMB());
|
||||||
|
}
|
||||||
|
if (other.hasStorefileIndexSizeMB()) {
|
||||||
|
setStorefileIndexSizeMB(other.getStorefileIndexSizeMB());
|
||||||
|
}
|
||||||
|
this.mergeUnknownFields(other.getUnknownFields());
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Builder mergeFrom(
|
||||||
|
com.google.protobuf.CodedInputStream input)
|
||||||
|
throws java.io.IOException {
|
||||||
|
return mergeFrom(input,
|
||||||
|
com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Builder mergeFrom(
|
||||||
|
com.google.protobuf.CodedInputStream input,
|
||||||
|
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||||
|
throws java.io.IOException {
|
||||||
|
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
||||||
|
com.google.protobuf.UnknownFieldSet.newBuilder(
|
||||||
|
this.getUnknownFields());
|
||||||
|
while (true) {
|
||||||
|
int tag = input.readTag();
|
||||||
|
switch (tag) {
|
||||||
|
case 0:
|
||||||
|
this.setUnknownFields(unknownFields.build());
|
||||||
|
return this;
|
||||||
|
default: {
|
||||||
|
if (!parseUnknownField(input, unknownFields,
|
||||||
|
extensionRegistry, tag)) {
|
||||||
|
this.setUnknownFields(unknownFields.build());
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 10: {
|
||||||
|
setName(input.readBytes());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 16: {
|
||||||
|
setStores(input.readInt32());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 24: {
|
||||||
|
setStorefiles(input.readInt32());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 32: {
|
||||||
|
setStorefileSizeMB(input.readInt32());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 40: {
|
||||||
|
setMemstoreSizeMB(input.readInt32());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 48: {
|
||||||
|
setStorefileIndexSizeMB(input.readInt32());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// required bytes name = 1;
|
||||||
|
public boolean hasName() {
|
||||||
|
return result.hasName();
|
||||||
|
}
|
||||||
|
public com.google.protobuf.ByteString getName() {
|
||||||
|
return result.getName();
|
||||||
|
}
|
||||||
|
public Builder setName(com.google.protobuf.ByteString value) {
|
||||||
|
if (value == null) {
|
||||||
|
throw new NullPointerException();
|
||||||
|
}
|
||||||
|
result.hasName = true;
|
||||||
|
result.name_ = value;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
public Builder clearName() {
|
||||||
|
result.hasName = false;
|
||||||
|
result.name_ = com.google.protobuf.ByteString.EMPTY;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
// optional int32 stores = 2;
|
||||||
|
public boolean hasStores() {
|
||||||
|
return result.hasStores();
|
||||||
|
}
|
||||||
|
public int getStores() {
|
||||||
|
return result.getStores();
|
||||||
|
}
|
||||||
|
public Builder setStores(int value) {
|
||||||
|
result.hasStores = true;
|
||||||
|
result.stores_ = value;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
public Builder clearStores() {
|
||||||
|
result.hasStores = false;
|
||||||
|
result.stores_ = 0;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
// optional int32 storefiles = 3;
|
||||||
|
public boolean hasStorefiles() {
|
||||||
|
return result.hasStorefiles();
|
||||||
|
}
|
||||||
|
public int getStorefiles() {
|
||||||
|
return result.getStorefiles();
|
||||||
|
}
|
||||||
|
public Builder setStorefiles(int value) {
|
||||||
|
result.hasStorefiles = true;
|
||||||
|
result.storefiles_ = value;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
public Builder clearStorefiles() {
|
||||||
|
result.hasStorefiles = false;
|
||||||
|
result.storefiles_ = 0;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
// optional int32 storefileSizeMB = 4;
|
||||||
|
public boolean hasStorefileSizeMB() {
|
||||||
|
return result.hasStorefileSizeMB();
|
||||||
|
}
|
||||||
|
public int getStorefileSizeMB() {
|
||||||
|
return result.getStorefileSizeMB();
|
||||||
|
}
|
||||||
|
public Builder setStorefileSizeMB(int value) {
|
||||||
|
result.hasStorefileSizeMB = true;
|
||||||
|
result.storefileSizeMB_ = value;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
public Builder clearStorefileSizeMB() {
|
||||||
|
result.hasStorefileSizeMB = false;
|
||||||
|
result.storefileSizeMB_ = 0;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
// optional int32 memstoreSizeMB = 5;
|
||||||
|
public boolean hasMemstoreSizeMB() {
|
||||||
|
return result.hasMemstoreSizeMB();
|
||||||
|
}
|
||||||
|
public int getMemstoreSizeMB() {
|
||||||
|
return result.getMemstoreSizeMB();
|
||||||
|
}
|
||||||
|
public Builder setMemstoreSizeMB(int value) {
|
||||||
|
result.hasMemstoreSizeMB = true;
|
||||||
|
result.memstoreSizeMB_ = value;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
public Builder clearMemstoreSizeMB() {
|
||||||
|
result.hasMemstoreSizeMB = false;
|
||||||
|
result.memstoreSizeMB_ = 0;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
// optional int32 storefileIndexSizeMB = 6;
|
||||||
|
public boolean hasStorefileIndexSizeMB() {
|
||||||
|
return result.hasStorefileIndexSizeMB();
|
||||||
|
}
|
||||||
|
public int getStorefileIndexSizeMB() {
|
||||||
|
return result.getStorefileIndexSizeMB();
|
||||||
|
}
|
||||||
|
public Builder setStorefileIndexSizeMB(int value) {
|
||||||
|
result.hasStorefileIndexSizeMB = true;
|
||||||
|
result.storefileIndexSizeMB_ = value;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
public Builder clearStorefileIndexSizeMB() {
|
||||||
|
result.hasStorefileIndexSizeMB = false;
|
||||||
|
result.storefileIndexSizeMB_ = 0;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static {
|
||||||
|
org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.getDescriptor();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static final class Node extends
|
public static final class Node extends
|
||||||
com.google.protobuf.GeneratedMessage {
|
com.google.protobuf.GeneratedMessage {
|
||||||
// Use Node.newBuilder() to construct.
|
// Use Node.newBuilder() to construct.
|
||||||
|
@ -64,35 +531,52 @@ public final class StorageClusterStatusMessage {
|
||||||
public boolean hasName() { return hasName; }
|
public boolean hasName() { return hasName; }
|
||||||
public java.lang.String getName() { return name_; }
|
public java.lang.String getName() { return name_; }
|
||||||
|
|
||||||
// optional int64 startCode = 4;
|
// optional int64 startCode = 2;
|
||||||
public static final int STARTCODE_FIELD_NUMBER = 4;
|
public static final int STARTCODE_FIELD_NUMBER = 2;
|
||||||
private boolean hasStartCode;
|
private boolean hasStartCode;
|
||||||
private long startCode_ = 0L;
|
private long startCode_ = 0L;
|
||||||
public boolean hasStartCode() { return hasStartCode; }
|
public boolean hasStartCode() { return hasStartCode; }
|
||||||
public long getStartCode() { return startCode_; }
|
public long getStartCode() { return startCode_; }
|
||||||
|
|
||||||
// optional int32 requests = 2;
|
// optional int32 requests = 3;
|
||||||
public static final int REQUESTS_FIELD_NUMBER = 2;
|
public static final int REQUESTS_FIELD_NUMBER = 3;
|
||||||
private boolean hasRequests;
|
private boolean hasRequests;
|
||||||
private int requests_ = 0;
|
private int requests_ = 0;
|
||||||
public boolean hasRequests() { return hasRequests; }
|
public boolean hasRequests() { return hasRequests; }
|
||||||
public int getRequests() { return requests_; }
|
public int getRequests() { return requests_; }
|
||||||
|
|
||||||
// repeated bytes regions = 3;
|
// optional int32 heapSizeMB = 4;
|
||||||
public static final int REGIONS_FIELD_NUMBER = 3;
|
public static final int HEAPSIZEMB_FIELD_NUMBER = 4;
|
||||||
private java.util.List<com.google.protobuf.ByteString> regions_ =
|
private boolean hasHeapSizeMB;
|
||||||
|
private int heapSizeMB_ = 0;
|
||||||
|
public boolean hasHeapSizeMB() { return hasHeapSizeMB; }
|
||||||
|
public int getHeapSizeMB() { return heapSizeMB_; }
|
||||||
|
|
||||||
|
// optional int32 maxHeapSizeMB = 5;
|
||||||
|
public static final int MAXHEAPSIZEMB_FIELD_NUMBER = 5;
|
||||||
|
private boolean hasMaxHeapSizeMB;
|
||||||
|
private int maxHeapSizeMB_ = 0;
|
||||||
|
public boolean hasMaxHeapSizeMB() { return hasMaxHeapSizeMB; }
|
||||||
|
public int getMaxHeapSizeMB() { return maxHeapSizeMB_; }
|
||||||
|
|
||||||
|
// repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatus.Region regions = 6;
|
||||||
|
public static final int REGIONS_FIELD_NUMBER = 6;
|
||||||
|
private java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> regions_ =
|
||||||
java.util.Collections.emptyList();
|
java.util.Collections.emptyList();
|
||||||
public java.util.List<com.google.protobuf.ByteString> getRegionsList() {
|
public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> getRegionsList() {
|
||||||
return regions_;
|
return regions_;
|
||||||
}
|
}
|
||||||
public int getRegionsCount() { return regions_.size(); }
|
public int getRegionsCount() { return regions_.size(); }
|
||||||
public com.google.protobuf.ByteString getRegions(int index) {
|
public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index) {
|
||||||
return regions_.get(index);
|
return regions_.get(index);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public final boolean isInitialized() {
|
public final boolean isInitialized() {
|
||||||
if (!hasName) return false;
|
if (!hasName) return false;
|
||||||
|
for (org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region element : getRegionsList()) {
|
||||||
|
if (!element.isInitialized()) return false;
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,14 +586,20 @@ public final class StorageClusterStatusMessage {
|
||||||
if (hasName()) {
|
if (hasName()) {
|
||||||
output.writeString(1, getName());
|
output.writeString(1, getName());
|
||||||
}
|
}
|
||||||
if (hasRequests()) {
|
|
||||||
output.writeInt32(2, getRequests());
|
|
||||||
}
|
|
||||||
for (com.google.protobuf.ByteString element : getRegionsList()) {
|
|
||||||
output.writeBytes(3, element);
|
|
||||||
}
|
|
||||||
if (hasStartCode()) {
|
if (hasStartCode()) {
|
||||||
output.writeInt64(4, getStartCode());
|
output.writeInt64(2, getStartCode());
|
||||||
|
}
|
||||||
|
if (hasRequests()) {
|
||||||
|
output.writeInt32(3, getRequests());
|
||||||
|
}
|
||||||
|
if (hasHeapSizeMB()) {
|
||||||
|
output.writeInt32(4, getHeapSizeMB());
|
||||||
|
}
|
||||||
|
if (hasMaxHeapSizeMB()) {
|
||||||
|
output.writeInt32(5, getMaxHeapSizeMB());
|
||||||
|
}
|
||||||
|
for (org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region element : getRegionsList()) {
|
||||||
|
output.writeMessage(6, element);
|
||||||
}
|
}
|
||||||
getUnknownFields().writeTo(output);
|
getUnknownFields().writeTo(output);
|
||||||
}
|
}
|
||||||
|
@ -125,22 +615,25 @@ public final class StorageClusterStatusMessage {
|
||||||
size += com.google.protobuf.CodedOutputStream
|
size += com.google.protobuf.CodedOutputStream
|
||||||
.computeStringSize(1, getName());
|
.computeStringSize(1, getName());
|
||||||
}
|
}
|
||||||
if (hasRequests()) {
|
|
||||||
size += com.google.protobuf.CodedOutputStream
|
|
||||||
.computeInt32Size(2, getRequests());
|
|
||||||
}
|
|
||||||
{
|
|
||||||
int dataSize = 0;
|
|
||||||
for (com.google.protobuf.ByteString element : getRegionsList()) {
|
|
||||||
dataSize += com.google.protobuf.CodedOutputStream
|
|
||||||
.computeBytesSizeNoTag(element);
|
|
||||||
}
|
|
||||||
size += dataSize;
|
|
||||||
size += 1 * getRegionsList().size();
|
|
||||||
}
|
|
||||||
if (hasStartCode()) {
|
if (hasStartCode()) {
|
||||||
size += com.google.protobuf.CodedOutputStream
|
size += com.google.protobuf.CodedOutputStream
|
||||||
.computeInt64Size(4, getStartCode());
|
.computeInt64Size(2, getStartCode());
|
||||||
|
}
|
||||||
|
if (hasRequests()) {
|
||||||
|
size += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeInt32Size(3, getRequests());
|
||||||
|
}
|
||||||
|
if (hasHeapSizeMB()) {
|
||||||
|
size += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeInt32Size(4, getHeapSizeMB());
|
||||||
|
}
|
||||||
|
if (hasMaxHeapSizeMB()) {
|
||||||
|
size += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeInt32Size(5, getMaxHeapSizeMB());
|
||||||
|
}
|
||||||
|
for (org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region element : getRegionsList()) {
|
||||||
|
size += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeMessageSize(6, element);
|
||||||
}
|
}
|
||||||
size += getUnknownFields().getSerializedSize();
|
size += getUnknownFields().getSerializedSize();
|
||||||
memoizedSerializedSize = size;
|
memoizedSerializedSize = size;
|
||||||
|
@ -296,9 +789,15 @@ public final class StorageClusterStatusMessage {
|
||||||
if (other.hasRequests()) {
|
if (other.hasRequests()) {
|
||||||
setRequests(other.getRequests());
|
setRequests(other.getRequests());
|
||||||
}
|
}
|
||||||
|
if (other.hasHeapSizeMB()) {
|
||||||
|
setHeapSizeMB(other.getHeapSizeMB());
|
||||||
|
}
|
||||||
|
if (other.hasMaxHeapSizeMB()) {
|
||||||
|
setMaxHeapSizeMB(other.getMaxHeapSizeMB());
|
||||||
|
}
|
||||||
if (!other.regions_.isEmpty()) {
|
if (!other.regions_.isEmpty()) {
|
||||||
if (result.regions_.isEmpty()) {
|
if (result.regions_.isEmpty()) {
|
||||||
result.regions_ = new java.util.ArrayList<com.google.protobuf.ByteString>();
|
result.regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>();
|
||||||
}
|
}
|
||||||
result.regions_.addAll(other.regions_);
|
result.regions_.addAll(other.regions_);
|
||||||
}
|
}
|
||||||
|
@ -341,15 +840,25 @@ public final class StorageClusterStatusMessage {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 16: {
|
case 16: {
|
||||||
|
setStartCode(input.readInt64());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 24: {
|
||||||
setRequests(input.readInt32());
|
setRequests(input.readInt32());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 26: {
|
case 32: {
|
||||||
addRegions(input.readBytes());
|
setHeapSizeMB(input.readInt32());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 32: {
|
case 40: {
|
||||||
setStartCode(input.readInt64());
|
setMaxHeapSizeMB(input.readInt32());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 50: {
|
||||||
|
org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.newBuilder();
|
||||||
|
input.readMessage(subBuilder, extensionRegistry);
|
||||||
|
addRegions(subBuilder.buildPartial());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -378,7 +887,7 @@ public final class StorageClusterStatusMessage {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
// optional int64 startCode = 4;
|
// optional int64 startCode = 2;
|
||||||
public boolean hasStartCode() {
|
public boolean hasStartCode() {
|
||||||
return result.hasStartCode();
|
return result.hasStartCode();
|
||||||
}
|
}
|
||||||
|
@ -396,7 +905,7 @@ public final class StorageClusterStatusMessage {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
// optional int32 requests = 2;
|
// optional int32 requests = 3;
|
||||||
public boolean hasRequests() {
|
public boolean hasRequests() {
|
||||||
return result.hasRequests();
|
return result.hasRequests();
|
||||||
}
|
}
|
||||||
|
@ -414,37 +923,84 @@ public final class StorageClusterStatusMessage {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
// repeated bytes regions = 3;
|
// optional int32 heapSizeMB = 4;
|
||||||
public java.util.List<com.google.protobuf.ByteString> getRegionsList() {
|
public boolean hasHeapSizeMB() {
|
||||||
|
return result.hasHeapSizeMB();
|
||||||
|
}
|
||||||
|
public int getHeapSizeMB() {
|
||||||
|
return result.getHeapSizeMB();
|
||||||
|
}
|
||||||
|
public Builder setHeapSizeMB(int value) {
|
||||||
|
result.hasHeapSizeMB = true;
|
||||||
|
result.heapSizeMB_ = value;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
public Builder clearHeapSizeMB() {
|
||||||
|
result.hasHeapSizeMB = false;
|
||||||
|
result.heapSizeMB_ = 0;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
// optional int32 maxHeapSizeMB = 5;
|
||||||
|
public boolean hasMaxHeapSizeMB() {
|
||||||
|
return result.hasMaxHeapSizeMB();
|
||||||
|
}
|
||||||
|
public int getMaxHeapSizeMB() {
|
||||||
|
return result.getMaxHeapSizeMB();
|
||||||
|
}
|
||||||
|
public Builder setMaxHeapSizeMB(int value) {
|
||||||
|
result.hasMaxHeapSizeMB = true;
|
||||||
|
result.maxHeapSizeMB_ = value;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
public Builder clearMaxHeapSizeMB() {
|
||||||
|
result.hasMaxHeapSizeMB = false;
|
||||||
|
result.maxHeapSizeMB_ = 0;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
// repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatus.Region regions = 6;
|
||||||
|
public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> getRegionsList() {
|
||||||
return java.util.Collections.unmodifiableList(result.regions_);
|
return java.util.Collections.unmodifiableList(result.regions_);
|
||||||
}
|
}
|
||||||
public int getRegionsCount() {
|
public int getRegionsCount() {
|
||||||
return result.getRegionsCount();
|
return result.getRegionsCount();
|
||||||
}
|
}
|
||||||
public com.google.protobuf.ByteString getRegions(int index) {
|
public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index) {
|
||||||
return result.getRegions(index);
|
return result.getRegions(index);
|
||||||
}
|
}
|
||||||
public Builder setRegions(int index, com.google.protobuf.ByteString value) {
|
public Builder setRegions(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region value) {
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
throw new NullPointerException();
|
throw new NullPointerException();
|
||||||
}
|
}
|
||||||
result.regions_.set(index, value);
|
result.regions_.set(index, value);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder addRegions(com.google.protobuf.ByteString value) {
|
public Builder setRegions(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder builderForValue) {
|
||||||
|
result.regions_.set(index, builderForValue.build());
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
public Builder addRegions(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region value) {
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
throw new NullPointerException();
|
throw new NullPointerException();
|
||||||
}
|
}
|
||||||
if (result.regions_.isEmpty()) {
|
if (result.regions_.isEmpty()) {
|
||||||
result.regions_ = new java.util.ArrayList<com.google.protobuf.ByteString>();
|
result.regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>();
|
||||||
}
|
}
|
||||||
result.regions_.add(value);
|
result.regions_.add(value);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder addAllRegions(
|
public Builder addRegions(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder builderForValue) {
|
||||||
java.lang.Iterable<? extends com.google.protobuf.ByteString> values) {
|
|
||||||
if (result.regions_.isEmpty()) {
|
if (result.regions_.isEmpty()) {
|
||||||
result.regions_ = new java.util.ArrayList<com.google.protobuf.ByteString>();
|
result.regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>();
|
||||||
|
}
|
||||||
|
result.regions_.add(builderForValue.build());
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
public Builder addAllRegions(
|
||||||
|
java.lang.Iterable<? extends org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> values) {
|
||||||
|
if (result.regions_.isEmpty()) {
|
||||||
|
result.regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>();
|
||||||
}
|
}
|
||||||
super.addAll(values, result.regions_);
|
super.addAll(values, result.regions_);
|
||||||
return this;
|
return this;
|
||||||
|
@ -953,6 +1509,11 @@ public final class StorageClusterStatusMessage {
|
||||||
private static
|
private static
|
||||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_fieldAccessorTable;
|
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_fieldAccessorTable;
|
||||||
|
private static com.google.protobuf.Descriptors.Descriptor
|
||||||
|
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Region_descriptor;
|
||||||
|
private static
|
||||||
|
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||||
|
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable;
|
||||||
private static com.google.protobuf.Descriptors.Descriptor
|
private static com.google.protobuf.Descriptors.Descriptor
|
||||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_descriptor;
|
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_descriptor;
|
||||||
private static
|
private static
|
||||||
|
@ -969,14 +1530,20 @@ public final class StorageClusterStatusMessage {
|
||||||
java.lang.String descriptorData =
|
java.lang.String descriptorData =
|
||||||
"\n!StorageClusterStatusMessage.proto\0223org" +
|
"\n!StorageClusterStatusMessage.proto\0223org" +
|
||||||
".apache.hadoop.hbase.stargate.protobuf.g" +
|
".apache.hadoop.hbase.stargate.protobuf.g" +
|
||||||
"enerated\"\220\002\n\024StorageClusterStatus\022a\n\tliv" +
|
"enerated\"\232\004\n\024StorageClusterStatus\022a\n\tliv" +
|
||||||
"eNodes\030\001 \003(\0132N.org.apache.hadoop.hbase.s" +
|
"eNodes\030\001 \003(\0132N.org.apache.hadoop.hbase.s" +
|
||||||
"targate.protobuf.generated.StorageCluste" +
|
"targate.protobuf.generated.StorageCluste" +
|
||||||
"rStatus.Node\022\021\n\tdeadNodes\030\002 \003(\t\022\017\n\007regio" +
|
"rStatus.Node\022\021\n\tdeadNodes\030\002 \003(\t\022\017\n\007regio" +
|
||||||
"ns\030\003 \001(\005\022\020\n\010requests\030\004 \001(\005\022\023\n\013averageLoa" +
|
"ns\030\003 \001(\005\022\020\n\010requests\030\004 \001(\005\022\023\n\013averageLoa" +
|
||||||
"d\030\005 \001(\001\032J\n\004Node\022\014\n\004name\030\001 \002(\t\022\021\n\tstartCo" +
|
"d\030\005 \001(\001\032\211\001\n\006Region\022\014\n\004name\030\001 \002(\014\022\016\n\006stor" +
|
||||||
"de\030\004 \001(\003\022\020\n\010requests\030\002 \001(\005\022\017\n\007regions\030\003 " +
|
"es\030\002 \001(\005\022\022\n\nstorefiles\030\003 \001(\005\022\027\n\017storefil" +
|
||||||
"\003(\014";
|
"eSizeMB\030\004 \001(\005\022\026\n\016memstoreSizeMB\030\005 \001(\005\022\034\n" +
|
||||||
|
"\024storefileIndexSizeMB\030\006 \001(\005\032\307\001\n\004Node\022\014\n\004" +
|
||||||
|
"name\030\001 \002(\t\022\021\n\tstartCode\030\002 \001(\003\022\020\n\010request" +
|
||||||
|
"s\030\003 \001(\005\022\022\n\nheapSizeMB\030\004 \001(\005\022\025\n\rmaxHeapSi" +
|
||||||
|
"zeMB\030\005 \001(\005\022a\n\007regions\030\006 \003(\0132P.org.apache" +
|
||||||
|
".hadoop.hbase.stargate.protobuf.generate" +
|
||||||
|
"d.StorageClusterStatus.Region";
|
||||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||||
public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
||||||
|
@ -990,12 +1557,20 @@ public final class StorageClusterStatusMessage {
|
||||||
new java.lang.String[] { "LiveNodes", "DeadNodes", "Regions", "Requests", "AverageLoad", },
|
new java.lang.String[] { "LiveNodes", "DeadNodes", "Regions", "Requests", "AverageLoad", },
|
||||||
org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.class,
|
org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.class,
|
||||||
org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder.class);
|
org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder.class);
|
||||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_descriptor =
|
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Region_descriptor =
|
||||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_descriptor.getNestedTypes().get(0);
|
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_descriptor.getNestedTypes().get(0);
|
||||||
|
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable = new
|
||||||
|
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||||
|
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Region_descriptor,
|
||||||
|
new java.lang.String[] { "Name", "Stores", "Storefiles", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", },
|
||||||
|
org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.class,
|
||||||
|
org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder.class);
|
||||||
|
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_descriptor =
|
||||||
|
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_descriptor.getNestedTypes().get(1);
|
||||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable = new
|
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable = new
|
||||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_descriptor,
|
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_descriptor,
|
||||||
new java.lang.String[] { "Name", "StartCode", "Requests", "Regions", },
|
new java.lang.String[] { "Name", "StartCode", "Requests", "HeapSizeMB", "MaxHeapSizeMB", "Regions", },
|
||||||
org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.class,
|
org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.class,
|
||||||
org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder.class);
|
org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder.class);
|
||||||
return null;
|
return null;
|
||||||
|
|
|
@ -37,20 +37,23 @@ public class TestStorageClusterStatusModel extends TestCase {
|
||||||
|
|
||||||
private static final String AS_XML =
|
private static final String AS_XML =
|
||||||
"<ClusterStatus requests=\"0\" regions=\"2\" averageLoad=\"1.0\">" +
|
"<ClusterStatus requests=\"0\" regions=\"2\" averageLoad=\"1.0\">" +
|
||||||
"<LiveNodes>" +
|
|
||||||
"<Node startCode=\"1245219839331\" requests=\"0\" name=\"test1\">" +
|
|
||||||
"<Region name=\"LVJPT1QtLCww\"/>" +
|
|
||||||
"</Node>" +
|
|
||||||
"<Node startCode=\"1245239331198\" requests=\"0\" name=\"test2\">" +
|
|
||||||
"<Region name=\"Lk1FVEEuLCwxMjQ2MDAwMDQzNzI0\"/>" +
|
|
||||||
"</Node>" +
|
|
||||||
"</LiveNodes>" +
|
|
||||||
"<DeadNodes/>" +
|
"<DeadNodes/>" +
|
||||||
"</ClusterStatus>";
|
"<LiveNodes><Node startCode=\"1245219839331\" requests=\"0\"" +
|
||||||
|
" name=\"test1\" maxHeapSizeMB=\"1024\" heapSizeMB=\"128\">" +
|
||||||
|
"<Region stores=\"1\" storefiles=\"1\" storefileSizeMB=\"0\"" +
|
||||||
|
" storefileIndexSizeMB=\"0\" name=\"LVJPT1QtLCww\"" +
|
||||||
|
" memstoreSizeMB=\"0\"/></Node>" +
|
||||||
|
"<Node startCode=\"1245239331198\" requests=\"0\" name=\"test2\"" +
|
||||||
|
" maxHeapSizeMB=\"1024\" heapSizeMB=\"512\">" +
|
||||||
|
"<Region stores=\"1\" storefiles=\"1\" storefileSizeMB=\"0\"" +
|
||||||
|
" storefileIndexSizeMB=\"0\" name=\"Lk1FVEEuLCwxMjQ2MDAwMDQzNzI0\"" +
|
||||||
|
" memstoreSizeMB=\"0\"/></Node>"+
|
||||||
|
"</LiveNodes></ClusterStatus>";
|
||||||
|
|
||||||
private static final String AS_PB =
|
private static final String AS_PB =
|
||||||
"ChsKBXRlc3QxEAAaCS1ST09ULSwsMCDjuovnniQKJwoFdGVzdDIQABoVLk1FVEEuLCwxMjQ2MDAw" +
|
"Ci0KBXRlc3QxEOO6i+eeJBgAIIABKIAIMhUKCS1ST09ULSwsMBABGAEgACgAMAAKOQoFdGVzdDIQ"+
|
||||||
"MDQzNzI0IP6SsfCeJBgCIAApAAAAAAAA8D8=";
|
"/pKx8J4kGAAggAQogAgyIQoVLk1FVEEuLCwxMjQ2MDAwMDQzNzI0EAEYASAAKAAwABgCIAApAAAA"+
|
||||||
|
"AAAA8D8=";
|
||||||
|
|
||||||
private JAXBContext context;
|
private JAXBContext context;
|
||||||
|
|
||||||
|
@ -64,10 +67,10 @@ public class TestStorageClusterStatusModel extends TestCase {
|
||||||
model.setRegions(2);
|
model.setRegions(2);
|
||||||
model.setRequests(0);
|
model.setRequests(0);
|
||||||
model.setAverageLoad(1.0);
|
model.setAverageLoad(1.0);
|
||||||
model.addLiveNode("test1", 1245219839331L)
|
model.addLiveNode("test1", 1245219839331L, 128, 1024)
|
||||||
.addRegion(Bytes.toBytes("-ROOT-,,0"));
|
.addRegion(Bytes.toBytes("-ROOT-,,0"), 1, 1, 0, 0, 0);
|
||||||
model.addLiveNode("test2", 1245239331198L)
|
model.addLiveNode("test2", 1245239331198L, 512, 1024)
|
||||||
.addRegion(Bytes.toBytes(".META.,,1246000043724"));
|
.addRegion(Bytes.toBytes(".META.,,1246000043724"),1, 1, 0, 0, 0);
|
||||||
return model;
|
return model;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,17 +105,31 @@ public class TestStorageClusterStatusModel extends TestCase {
|
||||||
StorageClusterStatusModel.Node node = nodes.next();
|
StorageClusterStatusModel.Node node = nodes.next();
|
||||||
assertEquals(node.getName(), "test1");
|
assertEquals(node.getName(), "test1");
|
||||||
assertEquals(node.getStartCode(), 1245219839331L);
|
assertEquals(node.getStartCode(), 1245219839331L);
|
||||||
|
assertEquals(node.getHeapSizeMB(), 128);
|
||||||
|
assertEquals(node.getMaxHeapSizeMB(), 1024);
|
||||||
Iterator<StorageClusterStatusModel.Node.Region> regions =
|
Iterator<StorageClusterStatusModel.Node.Region> regions =
|
||||||
node.getRegions().iterator();
|
node.getRegions().iterator();
|
||||||
StorageClusterStatusModel.Node.Region region = regions.next();
|
StorageClusterStatusModel.Node.Region region = regions.next();
|
||||||
assertTrue(Bytes.toString(region.getName()).equals("-ROOT-,,0"));
|
assertTrue(Bytes.toString(region.getName()).equals("-ROOT-,,0"));
|
||||||
|
assertEquals(region.getStores(), 1);
|
||||||
|
assertEquals(region.getStorefiles(), 1);
|
||||||
|
assertEquals(region.getStorefileSizeMB(), 0);
|
||||||
|
assertEquals(region.getMemstoreSizeMB(), 0);
|
||||||
|
assertEquals(region.getStorefileIndexSizeMB(), 0);
|
||||||
assertFalse(regions.hasNext());
|
assertFalse(regions.hasNext());
|
||||||
node = nodes.next();
|
node = nodes.next();
|
||||||
assertEquals(node.getName(), "test2");
|
assertEquals(node.getName(), "test2");
|
||||||
assertEquals(node.getStartCode(), 1245239331198L);
|
assertEquals(node.getStartCode(), 1245239331198L);
|
||||||
|
assertEquals(node.getHeapSizeMB(), 512);
|
||||||
|
assertEquals(node.getMaxHeapSizeMB(), 1024);
|
||||||
regions = node.getRegions().iterator();
|
regions = node.getRegions().iterator();
|
||||||
region = regions.next();
|
region = regions.next();
|
||||||
assertEquals(Bytes.toString(region.getName()), ".META.,,1246000043724");
|
assertEquals(Bytes.toString(region.getName()), ".META.,,1246000043724");
|
||||||
|
assertEquals(region.getStores(), 1);
|
||||||
|
assertEquals(region.getStorefiles(), 1);
|
||||||
|
assertEquals(region.getStorefileSizeMB(), 0);
|
||||||
|
assertEquals(region.getMemstoreSizeMB(), 0);
|
||||||
|
assertEquals(region.getStorefileIndexSizeMB(), 0);
|
||||||
assertFalse(regions.hasNext());
|
assertFalse(regions.hasNext());
|
||||||
assertFalse(nodes.hasNext());
|
assertFalse(nodes.hasNext());
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,6 +59,8 @@ public class HServerLoad implements WritableComparable<HServerLoad> {
|
||||||
private int stores;
|
private int stores;
|
||||||
/** the number of storefiles for the region */
|
/** the number of storefiles for the region */
|
||||||
private int storefiles;
|
private int storefiles;
|
||||||
|
/** the current total size of the store files for the region, in MB */
|
||||||
|
private int storefileSizeMB;
|
||||||
/** the current size of the memstore for the region, in MB */
|
/** the current size of the memstore for the region, in MB */
|
||||||
private int memstoreSizeMB;
|
private int memstoreSizeMB;
|
||||||
/** the current total size of storefile indexes for the region, in MB */
|
/** the current total size of storefile indexes for the region, in MB */
|
||||||
|
@ -75,15 +77,17 @@ public class HServerLoad implements WritableComparable<HServerLoad> {
|
||||||
* @param name
|
* @param name
|
||||||
* @param stores
|
* @param stores
|
||||||
* @param storefiles
|
* @param storefiles
|
||||||
|
* @param storefileSizeMB
|
||||||
* @param memstoreSizeMB
|
* @param memstoreSizeMB
|
||||||
* @param storefileIndexSizeMB
|
* @param storefileIndexSizeMB
|
||||||
*/
|
*/
|
||||||
public RegionLoad(final byte[] name, final int stores,
|
public RegionLoad(final byte[] name, final int stores,
|
||||||
final int storefiles, final int memstoreSizeMB,
|
final int storefiles, final int storefileSizeMB,
|
||||||
final int storefileIndexSizeMB) {
|
final int memstoreSizeMB, final int storefileIndexSizeMB) {
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.stores = stores;
|
this.stores = stores;
|
||||||
this.storefiles = storefiles;
|
this.storefiles = storefiles;
|
||||||
|
this.storefileSizeMB = storefileSizeMB;
|
||||||
this.memstoreSizeMB = memstoreSizeMB;
|
this.memstoreSizeMB = memstoreSizeMB;
|
||||||
this.storefileIndexSizeMB = storefileIndexSizeMB;
|
this.storefileIndexSizeMB = storefileIndexSizeMB;
|
||||||
}
|
}
|
||||||
|
@ -118,6 +122,13 @@ public class HServerLoad implements WritableComparable<HServerLoad> {
|
||||||
return storefiles;
|
return storefiles;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the total size of the storefiles, in MB
|
||||||
|
*/
|
||||||
|
public int getStorefileSizeMB() {
|
||||||
|
return storefileSizeMB;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the memstore size, in MB
|
* @return the memstore size, in MB
|
||||||
*/
|
*/
|
||||||
|
@ -177,6 +188,7 @@ public class HServerLoad implements WritableComparable<HServerLoad> {
|
||||||
in.readFully(this.name);
|
in.readFully(this.name);
|
||||||
this.stores = in.readInt();
|
this.stores = in.readInt();
|
||||||
this.storefiles = in.readInt();
|
this.storefiles = in.readInt();
|
||||||
|
this.storefileSizeMB = in.readInt();
|
||||||
this.memstoreSizeMB = in.readInt();
|
this.memstoreSizeMB = in.readInt();
|
||||||
this.storefileIndexSizeMB = in.readInt();
|
this.storefileIndexSizeMB = in.readInt();
|
||||||
}
|
}
|
||||||
|
@ -186,6 +198,7 @@ public class HServerLoad implements WritableComparable<HServerLoad> {
|
||||||
out.write(name);
|
out.write(name);
|
||||||
out.writeInt(stores);
|
out.writeInt(stores);
|
||||||
out.writeInt(storefiles);
|
out.writeInt(storefiles);
|
||||||
|
out.writeInt(storefileSizeMB);
|
||||||
out.writeInt(memstoreSizeMB);
|
out.writeInt(memstoreSizeMB);
|
||||||
out.writeInt(storefileIndexSizeMB);
|
out.writeInt(storefileIndexSizeMB);
|
||||||
}
|
}
|
||||||
|
@ -199,9 +212,11 @@ public class HServerLoad implements WritableComparable<HServerLoad> {
|
||||||
Integer.valueOf(this.stores));
|
Integer.valueOf(this.stores));
|
||||||
sb = Strings.appendKeyValue(sb, "storefiles",
|
sb = Strings.appendKeyValue(sb, "storefiles",
|
||||||
Integer.valueOf(this.storefiles));
|
Integer.valueOf(this.storefiles));
|
||||||
sb = Strings.appendKeyValue(sb, "memstoreSize",
|
sb = Strings.appendKeyValue(sb, "storefileSizeMB",
|
||||||
|
Integer.valueOf(this.storefileSizeMB));
|
||||||
|
sb = Strings.appendKeyValue(sb, "memstoreSizeMB",
|
||||||
Integer.valueOf(this.memstoreSizeMB));
|
Integer.valueOf(this.memstoreSizeMB));
|
||||||
sb = Strings.appendKeyValue(sb, "storefileIndexSize",
|
sb = Strings.appendKeyValue(sb, "storefileIndexSizeMB",
|
||||||
Integer.valueOf(this.storefileIndexSizeMB));
|
Integer.valueOf(this.storefileIndexSizeMB));
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
|
@ -332,6 +347,20 @@ public class HServerLoad implements WritableComparable<HServerLoad> {
|
||||||
return numberOfRequests;
|
return numberOfRequests;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @returns the amount of heap in use, in MB
|
||||||
|
*/
|
||||||
|
public int getUsedHeapMB() {
|
||||||
|
return usedHeapMB;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @returns the maximum allowable heap size, in MB
|
||||||
|
*/
|
||||||
|
public int getMaxHeapMB() {
|
||||||
|
return maxHeapMB;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return region load metrics
|
* @return region load metrics
|
||||||
*/
|
*/
|
||||||
|
@ -349,6 +378,16 @@ public class HServerLoad implements WritableComparable<HServerLoad> {
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return Total size of store files in MB
|
||||||
|
*/
|
||||||
|
public int getStorefileSizeInMB() {
|
||||||
|
int count = 0;
|
||||||
|
for (RegionLoad info: regionLoad)
|
||||||
|
count += info.getStorefileSizeMB();
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return Size of memstores in MB
|
* @return Size of memstores in MB
|
||||||
*/
|
*/
|
||||||
|
@ -417,10 +456,10 @@ public class HServerLoad implements WritableComparable<HServerLoad> {
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
public void addRegionInfo(final byte[] name, final int stores,
|
public void addRegionInfo(final byte[] name, final int stores,
|
||||||
final int storefiles, final int memstoreSizeMB,
|
final int storefiles, final int storefileSizeMB,
|
||||||
final int storefileIndexSizeMB) {
|
final int memstoreSizeMB, final int storefileIndexSizeMB) {
|
||||||
this.regionLoad.add(new HServerLoad.RegionLoad(name, stores, storefiles,
|
this.regionLoad.add(new HServerLoad.RegionLoad(name, stores, storefiles,
|
||||||
memstoreSizeMB, storefileIndexSizeMB));
|
storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Writable
|
// Writable
|
||||||
|
|
|
@ -72,7 +72,8 @@ public interface HBaseRPCProtocolVersion extends VersionedProtocol {
|
||||||
* <li>Version 18: HBASE-1302.</li>
|
* <li>Version 18: HBASE-1302.</li>
|
||||||
* <li>Version 19: Added getClusterStatus().</li>
|
* <li>Version 19: Added getClusterStatus().</li>
|
||||||
* <li>Version 20: Backed Transaction HBase out of HBase core.</li>
|
* <li>Version 20: Backed Transaction HBase out of HBase core.</li>
|
||||||
|
* <li>Version 21: HBASE-1665.</li>
|
||||||
* </ul>
|
* </ul>
|
||||||
*/
|
*/
|
||||||
public static final long versionID = 20L;
|
public static final long versionID = 21L;
|
||||||
}
|
}
|
||||||
|
|
|
@ -759,18 +759,21 @@ public class HRegionServer implements HConstants, HRegionInterface,
|
||||||
byte[] name = r.getRegionName();
|
byte[] name = r.getRegionName();
|
||||||
int stores = 0;
|
int stores = 0;
|
||||||
int storefiles = 0;
|
int storefiles = 0;
|
||||||
|
int storefileSizeMB = 0;
|
||||||
int memstoreSizeMB = (int)(r.memstoreSize.get()/1024/1024);
|
int memstoreSizeMB = (int)(r.memstoreSize.get()/1024/1024);
|
||||||
int storefileIndexSizeMB = 0;
|
int storefileIndexSizeMB = 0;
|
||||||
synchronized (r.stores) {
|
synchronized (r.stores) {
|
||||||
stores += r.stores.size();
|
stores += r.stores.size();
|
||||||
for (Store store: r.stores.values()) {
|
for (Store store: r.stores.values()) {
|
||||||
storefiles += store.getStorefilesCount();
|
storefiles += store.getStorefilesCount();
|
||||||
|
storefileSizeMB +=
|
||||||
|
(int)(store.getStorefilesSize()/1024/1024);
|
||||||
storefileIndexSizeMB +=
|
storefileIndexSizeMB +=
|
||||||
(int)(store.getStorefilesIndexSize()/1024/1024);
|
(int)(store.getStorefilesIndexSize()/1024/1024);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return new HServerLoad.RegionLoad(name, stores, storefiles, memstoreSizeMB,
|
return new HServerLoad.RegionLoad(name, stores, storefiles,
|
||||||
storefileIndexSizeMB);
|
storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1405,6 +1405,22 @@ public class Store implements HConstants, HeapSize {
|
||||||
return this.storefiles.size();
|
return this.storefiles.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return The size of the store files, in bytes.
|
||||||
|
*/
|
||||||
|
long getStorefilesSize() {
|
||||||
|
long size = 0;
|
||||||
|
for (StoreFile s: storefiles.values()) {
|
||||||
|
Reader r = s.getReader();
|
||||||
|
if (r == null) {
|
||||||
|
LOG.warn("StoreFile " + s + " has a null Reader");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
size += r.length();
|
||||||
|
}
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return The size of the store file indexes, in bytes.
|
* @return The size of the store file indexes, in bytes.
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue