HBASE-22903 : Table to RegionStatesCount metrics - Use for broken alter_status command (#611)

Signed-off-by: huzheng <openinx@gmail.com>
This commit is contained in:
Viraj Jasani 2019-09-30 15:05:22 +05:30 committed by huzheng
parent 51184980a9
commit 15ec3c458c
11 changed files with 418 additions and 10 deletions

View File

@ -23,6 +23,7 @@ import edu.umd.cs.findbugs.annotations.Nullable;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionStatesCount;
import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionState;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@ -152,6 +153,14 @@ public interface ClusterMetrics {
return (double)getRegionCount() / (double)serverSize; return (double)getRegionCount() / (double)serverSize;
} }
/**
* Provide region states count for given table.
* e.g howmany regions of give table are opened/closed/rit etc
*
* @return map of table to region states count
*/
Map<TableName, RegionStatesCount> getTableRegionStatesCount();
/** /**
* Kinds of ClusterMetrics * Kinds of ClusterMetrics
*/ */
@ -199,6 +208,10 @@ public interface ClusterMetrics {
/** /**
* metrics about live region servers name * metrics about live region servers name
*/ */
SERVERS_NAME SERVERS_NAME,
/**
* metrics about table to no of regions status count
*/
TABLE_TO_REGIONS_COUNT,
} }
} }

View File

@ -26,6 +26,8 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import org.apache.hadoop.hbase.client.RegionStatesCount;
import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionState;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@ -70,6 +72,13 @@ public final class ClusterMetricsBuilder {
.collect(Collectors.toList())) .collect(Collectors.toList()))
.setMasterInfoPort(metrics.getMasterInfoPort()) .setMasterInfoPort(metrics.getMasterInfoPort())
.addAllServersName(metrics.getServersName().stream().map(ProtobufUtil::toServerName) .addAllServersName(metrics.getServersName().stream().map(ProtobufUtil::toServerName)
.collect(Collectors.toList()))
.addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream()
.map(status ->
ClusterStatusProtos.TableRegionStatesCount.newBuilder()
.setTableName(ProtobufUtil.toProtoTableName((status.getKey())))
.setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue()))
.build())
.collect(Collectors.toList())); .collect(Collectors.toList()));
if (metrics.getMasterName() != null) { if (metrics.getMasterName() != null) {
builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName()))); builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName())));
@ -108,7 +117,12 @@ public final class ClusterMetricsBuilder {
.map(HBaseProtos.Coprocessor::getName) .map(HBaseProtos.Coprocessor::getName)
.collect(Collectors.toList())) .collect(Collectors.toList()))
.setServerNames(proto.getServersNameList().stream().map(ProtobufUtil::toServerName) .setServerNames(proto.getServersNameList().stream().map(ProtobufUtil::toServerName)
.collect(Collectors.toList())); .collect(Collectors.toList()))
.setTableRegionStatesCount(
proto.getTableRegionStatesCountList().stream()
.collect(Collectors.toMap(
e -> ProtobufUtil.toTableName(e.getTableName()),
e -> ProtobufUtil.toTableRegionStatesCount(e.getRegionStatesCount()))));
if (proto.hasClusterId()) { if (proto.hasClusterId()) {
builder.setClusterId(ClusterId.convert(proto.getClusterId()).toString()); builder.setClusterId(ClusterId.convert(proto.getClusterId()).toString());
} }
@ -149,6 +163,7 @@ public final class ClusterMetricsBuilder {
case BALANCER_ON: return ClusterMetrics.Option.BALANCER_ON; case BALANCER_ON: return ClusterMetrics.Option.BALANCER_ON;
case SERVERS_NAME: return ClusterMetrics.Option.SERVERS_NAME; case SERVERS_NAME: return ClusterMetrics.Option.SERVERS_NAME;
case MASTER_INFO_PORT: return ClusterMetrics.Option.MASTER_INFO_PORT; case MASTER_INFO_PORT: return ClusterMetrics.Option.MASTER_INFO_PORT;
case TABLE_TO_REGIONS_COUNT: return ClusterMetrics.Option.TABLE_TO_REGIONS_COUNT;
// should not reach here // should not reach here
default: throw new IllegalArgumentException("Invalid option: " + option); default: throw new IllegalArgumentException("Invalid option: " + option);
} }
@ -172,6 +187,7 @@ public final class ClusterMetricsBuilder {
case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON; case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON;
case SERVERS_NAME: return Option.SERVERS_NAME; case SERVERS_NAME: return Option.SERVERS_NAME;
case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT; case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT;
case TABLE_TO_REGIONS_COUNT: return ClusterStatusProtos.Option.TABLE_TO_REGIONS_COUNT;
// should not reach here // should not reach here
default: throw new IllegalArgumentException("Invalid option: " + option); default: throw new IllegalArgumentException("Invalid option: " + option);
} }
@ -214,6 +230,7 @@ public final class ClusterMetricsBuilder {
private Boolean balancerOn; private Boolean balancerOn;
private int masterInfoPort; private int masterInfoPort;
private List<ServerName> serversName = Collections.emptyList(); private List<ServerName> serversName = Collections.emptyList();
private Map<TableName, RegionStatesCount> tableRegionStatesCount = Collections.emptyMap();
private ClusterMetricsBuilder() { private ClusterMetricsBuilder() {
} }
@ -263,6 +280,13 @@ public final class ClusterMetricsBuilder {
this.serversName = serversName; this.serversName = serversName;
return this; return this;
} }
public ClusterMetricsBuilder setTableRegionStatesCount(
Map<TableName, RegionStatesCount> tableRegionStatesCount) {
this.tableRegionStatesCount = tableRegionStatesCount;
return this;
}
public ClusterMetrics build() { public ClusterMetrics build() {
return new ClusterMetricsImpl( return new ClusterMetricsImpl(
hbaseVersion, hbaseVersion,
@ -275,7 +299,9 @@ public final class ClusterMetricsBuilder {
masterCoprocessorNames, masterCoprocessorNames,
balancerOn, balancerOn,
masterInfoPort, masterInfoPort,
serversName); serversName,
tableRegionStatesCount
);
} }
private static class ClusterMetricsImpl implements ClusterMetrics { private static class ClusterMetricsImpl implements ClusterMetrics {
@Nullable @Nullable
@ -293,6 +319,7 @@ public final class ClusterMetricsBuilder {
private final Boolean balancerOn; private final Boolean balancerOn;
private final int masterInfoPort; private final int masterInfoPort;
private final List<ServerName> serversName; private final List<ServerName> serversName;
private final Map<TableName, RegionStatesCount> tableRegionStatesCount;
ClusterMetricsImpl(String hbaseVersion, List<ServerName> deadServerNames, ClusterMetricsImpl(String hbaseVersion, List<ServerName> deadServerNames,
Map<ServerName, ServerMetrics> liveServerMetrics, Map<ServerName, ServerMetrics> liveServerMetrics,
@ -303,7 +330,8 @@ public final class ClusterMetricsBuilder {
List<String> masterCoprocessorNames, List<String> masterCoprocessorNames,
Boolean balancerOn, Boolean balancerOn,
int masterInfoPort, int masterInfoPort,
List<ServerName> serversName) { List<ServerName> serversName,
Map<TableName, RegionStatesCount> tableRegionStatesCount) {
this.hbaseVersion = hbaseVersion; this.hbaseVersion = hbaseVersion;
this.deadServerNames = Preconditions.checkNotNull(deadServerNames); this.deadServerNames = Preconditions.checkNotNull(deadServerNames);
this.liveServerMetrics = Preconditions.checkNotNull(liveServerMetrics); this.liveServerMetrics = Preconditions.checkNotNull(liveServerMetrics);
@ -315,6 +343,7 @@ public final class ClusterMetricsBuilder {
this.balancerOn = balancerOn; this.balancerOn = balancerOn;
this.masterInfoPort = masterInfoPort; this.masterInfoPort = masterInfoPort;
this.serversName = serversName; this.serversName = serversName;
this.tableRegionStatesCount = Preconditions.checkNotNull(tableRegionStatesCount);
} }
@Override @Override
@ -372,6 +401,11 @@ public final class ClusterMetricsBuilder {
return Collections.unmodifiableList(serversName); return Collections.unmodifiableList(serversName);
} }
@Override
public Map<TableName, RegionStatesCount> getTableRegionStatesCount() {
return Collections.unmodifiableMap(tableRegionStatesCount);
}
@Override @Override
public String toString() { public String toString() {
StringBuilder sb = new StringBuilder(1024); StringBuilder sb = new StringBuilder(1024);

View File

@ -26,6 +26,8 @@ import java.util.Collection;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import org.apache.hadoop.hbase.client.RegionStatesCount;
import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionState;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@ -349,6 +351,11 @@ public class ClusterStatus implements ClusterMetrics {
return metrics.getServersName(); return metrics.getServersName();
} }
@Override
public Map<TableName, RegionStatesCount> getTableRegionStatesCount() {
return metrics.getTableRegionStatesCount();
}
@Override @Override
public String toString() { public String toString() {
StringBuilder sb = new StringBuilder(1024); StringBuilder sb = new StringBuilder(1024);

View File

@ -0,0 +1,167 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private
public final class RegionStatesCount {
private int openRegions;
private int splitRegions;
private int closedRegions;
private int regionsInTransition;
private int totalRegions;
private RegionStatesCount() {
}
public int getClosedRegions() {
return closedRegions;
}
public int getOpenRegions() {
return openRegions;
}
public int getSplitRegions() {
return splitRegions;
}
public int getRegionsInTransition() {
return regionsInTransition;
}
public int getTotalRegions() {
return totalRegions;
}
private void setClosedRegions(int closedRegions) {
this.closedRegions = closedRegions;
}
private void setOpenRegions(int openRegions) {
this.openRegions = openRegions;
}
private void setSplitRegions(int splitRegions) {
this.splitRegions = splitRegions;
}
private void setRegionsInTransition(int regionsInTransition) {
this.regionsInTransition = regionsInTransition;
}
private void setTotalRegions(int totalRegions) {
this.totalRegions = totalRegions;
}
public static class RegionStatesCountBuilder {
private int openRegions;
private int splitRegions;
private int closedRegions;
private int regionsInTransition;
private int totalRegions;
public RegionStatesCountBuilder setOpenRegions(int openRegions) {
this.openRegions = openRegions;
return this;
}
public RegionStatesCountBuilder setSplitRegions(int splitRegions) {
this.splitRegions = splitRegions;
return this;
}
public RegionStatesCountBuilder setClosedRegions(int closedRegions) {
this.closedRegions = closedRegions;
return this;
}
public RegionStatesCountBuilder setRegionsInTransition(int regionsInTransition) {
this.regionsInTransition = regionsInTransition;
return this;
}
public RegionStatesCountBuilder setTotalRegions(int totalRegions) {
this.totalRegions = totalRegions;
return this;
}
public RegionStatesCount build() {
RegionStatesCount regionStatesCount=new RegionStatesCount();
regionStatesCount.setOpenRegions(openRegions);
regionStatesCount.setClosedRegions(closedRegions);
regionStatesCount.setRegionsInTransition(regionsInTransition);
regionStatesCount.setSplitRegions(splitRegions);
regionStatesCount.setTotalRegions(totalRegions);
return regionStatesCount;
}
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("RegionStatesCount{");
sb.append("openRegions=").append(openRegions);
sb.append(", splitRegions=").append(splitRegions);
sb.append(", closedRegions=").append(closedRegions);
sb.append(", regionsInTransition=").append(regionsInTransition);
sb.append(", totalRegions=").append(totalRegions);
sb.append('}');
return sb.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
RegionStatesCount that = (RegionStatesCount) o;
if (openRegions != that.openRegions) {
return false;
}
if (splitRegions != that.splitRegions) {
return false;
}
if (closedRegions != that.closedRegions) {
return false;
}
if (regionsInTransition != that.regionsInTransition) {
return false;
}
return totalRegions == that.totalRegions;
}
@Override
public int hashCode() {
int result = openRegions;
result = 31 * result + splitRegions;
result = 31 * result + closedRegions;
result = 31 * result + regionsInTransition;
result = 31 * result + totalRegions;
return result;
}
}

View File

@ -80,6 +80,7 @@ import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionLoadStats; import org.apache.hadoop.hbase.client.RegionLoadStats;
import org.apache.hadoop.hbase.client.RegionStatesCount;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.client.SnapshotDescription;
@ -3309,4 +3310,51 @@ public final class ProtobufUtil {
} }
return Collections.emptySet(); return Collections.emptySet();
} }
public static ClusterStatusProtos.RegionStatesCount toTableRegionStatesCount(
RegionStatesCount regionStatesCount) {
int openRegions = 0;
int splitRegions = 0;
int closedRegions = 0;
int regionsInTransition = 0;
int totalRegions = 0;
if (regionStatesCount != null) {
openRegions = regionStatesCount.getOpenRegions();
splitRegions = regionStatesCount.getSplitRegions();
closedRegions = regionStatesCount.getClosedRegions();
regionsInTransition = regionStatesCount.getRegionsInTransition();
totalRegions = regionStatesCount.getTotalRegions();
}
return ClusterStatusProtos.RegionStatesCount.newBuilder()
.setOpenRegions(openRegions)
.setSplitRegions(splitRegions)
.setClosedRegions(closedRegions)
.setRegionsInTransition(regionsInTransition)
.setTotalRegions(totalRegions)
.build();
}
public static RegionStatesCount toTableRegionStatesCount(
ClusterStatusProtos.RegionStatesCount regionStatesCount) {
int openRegions = 0;
int splitRegions = 0;
int closedRegions = 0;
int regionsInTransition = 0;
int totalRegions = 0;
if (regionStatesCount != null) {
closedRegions = regionStatesCount.getClosedRegions();
regionsInTransition = regionStatesCount.getRegionsInTransition();
openRegions = regionStatesCount.getOpenRegions();
splitRegions = regionStatesCount.getSplitRegions();
totalRegions = regionStatesCount.getTotalRegions();
}
return new RegionStatesCount.RegionStatesCountBuilder()
.setOpenRegions(openRegions)
.setSplitRegions(splitRegions)
.setClosedRegions(closedRegions)
.setRegionsInTransition(regionsInTransition)
.setTotalRegions(totalRegions)
.build();
}
} }

View File

@ -231,6 +231,19 @@ message LiveServerInfo {
required ServerLoad server_load = 2; required ServerLoad server_load = 2;
} }
message RegionStatesCount {
required uint32 open_regions = 1;
required uint32 split_regions = 2;
required uint32 closed_regions = 3;
required uint32 regions_in_transition = 4;
required uint32 total_regions = 5;
}
message TableRegionStatesCount {
required TableName table_name = 1;
required RegionStatesCount region_states_count = 2;
}
message ClusterStatus { message ClusterStatus {
optional HBaseVersionFileContent hbase_version = 1; optional HBaseVersionFileContent hbase_version = 1;
repeated LiveServerInfo live_servers = 2; repeated LiveServerInfo live_servers = 2;
@ -243,6 +256,7 @@ message ClusterStatus {
optional bool balancer_on = 9; optional bool balancer_on = 9;
optional int32 master_info_port = 10 [default = -1]; optional int32 master_info_port = 10 [default = -1];
repeated ServerName servers_name = 11; repeated ServerName servers_name = 11;
repeated TableRegionStatesCount table_region_states_count = 12;
} }
enum Option { enum Option {
@ -257,4 +271,5 @@ enum Option {
BALANCER_ON = 8; BALANCER_ON = 8;
MASTER_INFO_PORT = 9; MASTER_INFO_PORT = 9;
SERVERS_NAME = 10; SERVERS_NAME = 10;
TABLE_TO_REGIONS_COUNT = 11;
} }

View File

@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionStatesCount;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.TableState;
@ -2562,6 +2563,24 @@ public class HMaster extends HRegionServer implements MasterServices {
} }
break; break;
} }
case TABLE_TO_REGIONS_COUNT: {
if (isActiveMaster() && isInitialized() && assignmentManager != null) {
try {
Map<TableName, RegionStatesCount> tableRegionStatesCountMap = new HashMap<>();
Map<String, TableDescriptor> tableDescriptorMap = getTableDescriptors().getAll();
for (TableDescriptor tableDescriptor : tableDescriptorMap.values()) {
TableName tableName = tableDescriptor.getTableName();
RegionStatesCount regionStatesCount = assignmentManager
.getRegionStatesCount(tableName);
tableRegionStatesCountMap.put(tableName, regionStatesCount);
}
builder.setTableRegionStatesCount(tableRegionStatesCountMap);
} catch (IOException e) {
LOG.error("Error while populating TABLE_TO_REGIONS_COUNT for Cluster Metrics..", e);
}
}
break;
}
} }
} }
return builder.build(); return builder.build();

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.DoNotRetryRegionException; import org.apache.hadoop.hbase.client.DoNotRetryRegionException;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionStatesCount;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
@ -2081,4 +2082,41 @@ public class AssignmentManager {
} }
return rsReportsSnapshot; return rsReportsSnapshot;
} }
/**
* Provide regions state count for given table.
* e.g howmany regions of give table are opened/closed/rit etc
*
* @param tableName TableName
* @return region states count
*/
public RegionStatesCount getRegionStatesCount(TableName tableName) {
int openRegionsCount = 0;
int closedRegionCount = 0;
int ritCount = 0;
int splitRegionCount = 0;
int totalRegionCount = 0;
if (!isTableDisabled(tableName)) {
final List<RegionState> states = regionStates.getTableRegionStates(tableName);
for (RegionState regionState : states) {
if (regionState.isOpened()) {
openRegionsCount++;
} else if (regionState.isClosed()) {
closedRegionCount++;
} else if (regionState.isSplit()) {
splitRegionCount++;
}
}
totalRegionCount = states.size();
ritCount = totalRegionCount - openRegionsCount - splitRegionCount;
}
return new RegionStatesCount.RegionStatesCountBuilder()
.setOpenRegions(openRegionsCount)
.setClosedRegions(closedRegionCount)
.setSplitRegions(splitRegionCount)
.setRegionsInTransition(ritCount)
.setTotalRegions(totalRegionCount)
.build();
}
} }

View File

@ -30,6 +30,9 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.AsyncAdmin; import org.apache.hadoop.hbase.client.AsyncAdmin;
import org.apache.hadoop.hbase.client.AsyncConnection; import org.apache.hadoop.hbase.client.AsyncConnection;
import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionStatesCount;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
@ -38,6 +41,7 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
import org.junit.AfterClass; import org.junit.AfterClass;
@ -60,6 +64,9 @@ public class TestClientClusterMetrics {
private final static int MASTERS = 3; private final static int MASTERS = 3;
private static MiniHBaseCluster CLUSTER; private static MiniHBaseCluster CLUSTER;
private static HRegionServer DEAD; private static HRegionServer DEAD;
private static final TableName TABLE_NAME = TableName.valueOf("test");
private static final byte[] CF = Bytes.toBytes("cf");
@BeforeClass @BeforeClass
public static void setUpBeforeClass() throws Exception { public static void setUpBeforeClass() throws Exception {
@ -123,6 +130,11 @@ public class TestClientClusterMetrics {
defaults.getLiveServerMetrics().size()); defaults.getLiveServerMetrics().size());
Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort()); Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort());
Assert.assertEquals(origin.getServersName().size(), defaults.getServersName().size()); Assert.assertEquals(origin.getServersName().size(), defaults.getServersName().size());
origin.getTableRegionStatesCount().forEach(((tableName, regionStatesCount) -> {
RegionStatesCount defaultRegionStatesCount = defaults.getTableRegionStatesCount()
.get(tableName);
Assert.assertEquals(defaultRegionStatesCount, regionStatesCount);
}));
} }
} }
@ -167,6 +179,38 @@ public class TestClientClusterMetrics {
Assert.assertEquals(numRs, metrics.getServersName().size()); Assert.assertEquals(numRs, metrics.getServersName().size());
} }
@Test
public void testRegionStatesCount() throws Exception {
Table table = UTIL.createTable(TABLE_NAME, CF);
table.put(new Put(Bytes.toBytes("k1"))
.addColumn(CF, Bytes.toBytes("q1"), Bytes.toBytes("v1")));
table.put(new Put(Bytes.toBytes("k2"))
.addColumn(CF, Bytes.toBytes("q2"), Bytes.toBytes("v2")));
table.put(new Put(Bytes.toBytes("k3"))
.addColumn(CF, Bytes.toBytes("q3"), Bytes.toBytes("v3")));
ClusterMetrics metrics = ADMIN.getClusterMetrics();
Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 3);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME)
.getRegionsInTransition(), 0);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME)
.getOpenRegions(), 1);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME)
.getTotalRegions(), 1);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME)
.getClosedRegions(), 0);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME)
.getSplitRegions(), 0);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME)
.getRegionsInTransition(), 0);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME)
.getOpenRegions(), 1);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME)
.getTotalRegions(), 1);
UTIL.deleteTable(TABLE_NAME);
}
@Test @Test
public void testMasterAndBackupMastersStatus() throws Exception { public void testMasterAndBackupMastersStatus() throws Exception {
// get all the master threads // get all the master threads
@ -224,6 +268,17 @@ public class TestClientClusterMetrics {
Assert.assertEquals(postCount + 1, MyObserver.POST_COUNT.get()); Assert.assertEquals(postCount + 1, MyObserver.POST_COUNT.get());
} }
private static void insertData(final TableName tableName, int startRow, int rowCount)
throws IOException {
Table t = UTIL.getConnection().getTable(tableName);
Put p;
for (int i = 0; i < rowCount; i++) {
p = new Put(Bytes.toBytes("" + (startRow + i)));
p.addColumn(CF, Bytes.toBytes("val1"), Bytes.toBytes(i));
t.put(p);
}
}
public static class MyObserver implements MasterCoprocessor, MasterObserver { public static class MyObserver implements MasterCoprocessor, MasterObserver {
private static final AtomicInteger PRE_COUNT = new AtomicInteger(0); private static final AtomicInteger PRE_COUNT = new AtomicInteger(0);
private static final AtomicInteger POST_COUNT = new AtomicInteger(0); private static final AtomicInteger POST_COUNT = new AtomicInteger(0);

View File

@ -568,16 +568,21 @@ module Hbase
# Table should exist # Table should exist
raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name) raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name)
status = Pair.new
begin begin
status = @admin.getAlterStatus(org.apache.hadoop.hbase.TableName.valueOf(table_name)) cluster_metrics = @admin.getClusterMetrics
if status.getSecond != 0 table_region_status = cluster_metrics
puts "#{status.getSecond - status.getFirst}/#{status.getSecond} regions updated." .getTableRegionStatesCount
.get(org.apache.hadoop.hbase.TableName.valueOf(table_name))
if table_region_status.getTotalRegions != 0
updated_regions = table_region_status.getTotalRegions -
table_region_status.getRegionsInTransition -
table_region_status.getClosedRegions
puts "#{updated_regions}/#{table_region_status.getTotalRegions} regions updated."
else else
puts 'All regions updated.' puts 'All regions updated.'
end end
sleep 1 sleep 1
end while !status.nil? && status.getFirst != 0 end while !table_region_status.nil? && table_region_status.getRegionsInTransition != 0
puts 'Done.' puts 'Done.'
end end

View File

@ -110,6 +110,13 @@ module Hbase
#------------------------------------------------------------------------------- #-------------------------------------------------------------------------------
define_test 'alter_status should work' do
output = capture_stdout { command(:alter_status, @test_name) }
assert(output.include?('1/1 regions updated'))
end
#-------------------------------------------------------------------------------
define_test "compact should work" do define_test "compact should work" do
command(:compact, 'hbase:meta') command(:compact, 'hbase:meta')
end end