diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java index acced3ea82b..497ab938856 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java @@ -23,6 +23,7 @@ import edu.umd.cs.findbugs.annotations.Nullable; import java.util.List; import java.util.Map; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.master.RegionState; import org.apache.yetus.audience.InterfaceAudience; @@ -152,6 +153,14 @@ public interface ClusterMetrics { return (double)getRegionCount() / (double)serverSize; } + /** + * Provide region states count for given table. + * e.g howmany regions of give table are opened/closed/rit etc + * + * @return map of table to region states count + */ + Map getTableRegionStatesCount(); + /** * Kinds of ClusterMetrics */ @@ -199,6 +208,10 @@ public interface ClusterMetrics { /** * metrics about live region servers name */ - SERVERS_NAME + SERVERS_NAME, + /** + * metrics about table to no of regions status count + */ + TABLE_TO_REGIONS_COUNT, } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java index 30728ac4354..493fe71b8b0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java @@ -26,6 +26,8 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.stream.Collectors; + +import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.master.RegionState; import org.apache.yetus.audience.InterfaceAudience; @@ -70,7 +72,14 @@ public final class ClusterMetricsBuilder { .collect(Collectors.toList())) .setMasterInfoPort(metrics.getMasterInfoPort()) .addAllServersName(metrics.getServersName().stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList())); + .collect(Collectors.toList())) + .addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream() + .map(status -> + ClusterStatusProtos.TableRegionStatesCount.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName((status.getKey()))) + .setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue())) + .build()) + .collect(Collectors.toList())); if (metrics.getMasterName() != null) { builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName()))); } @@ -108,7 +117,12 @@ public final class ClusterMetricsBuilder { .map(HBaseProtos.Coprocessor::getName) .collect(Collectors.toList())) .setServerNames(proto.getServersNameList().stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList())); + .collect(Collectors.toList())) + .setTableRegionStatesCount( + proto.getTableRegionStatesCountList().stream() + .collect(Collectors.toMap( + e -> ProtobufUtil.toTableName(e.getTableName()), + e -> ProtobufUtil.toTableRegionStatesCount(e.getRegionStatesCount())))); if (proto.hasClusterId()) { builder.setClusterId(ClusterId.convert(proto.getClusterId()).toString()); } @@ -149,6 +163,7 @@ public final class ClusterMetricsBuilder { case BALANCER_ON: return ClusterMetrics.Option.BALANCER_ON; case SERVERS_NAME: return ClusterMetrics.Option.SERVERS_NAME; case MASTER_INFO_PORT: return ClusterMetrics.Option.MASTER_INFO_PORT; + case TABLE_TO_REGIONS_COUNT: return ClusterMetrics.Option.TABLE_TO_REGIONS_COUNT; // should not reach here default: throw new IllegalArgumentException("Invalid option: " + option); } @@ -172,6 +187,7 @@ public final class ClusterMetricsBuilder { case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON; case SERVERS_NAME: return Option.SERVERS_NAME; case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT; + case TABLE_TO_REGIONS_COUNT: return ClusterStatusProtos.Option.TABLE_TO_REGIONS_COUNT; // should not reach here default: throw new IllegalArgumentException("Invalid option: " + option); } @@ -214,6 +230,7 @@ public final class ClusterMetricsBuilder { private Boolean balancerOn; private int masterInfoPort; private List serversName = Collections.emptyList(); + private Map tableRegionStatesCount = Collections.emptyMap(); private ClusterMetricsBuilder() { } @@ -263,6 +280,13 @@ public final class ClusterMetricsBuilder { this.serversName = serversName; return this; } + + public ClusterMetricsBuilder setTableRegionStatesCount( + Map tableRegionStatesCount) { + this.tableRegionStatesCount = tableRegionStatesCount; + return this; + } + public ClusterMetrics build() { return new ClusterMetricsImpl( hbaseVersion, @@ -275,7 +299,9 @@ public final class ClusterMetricsBuilder { masterCoprocessorNames, balancerOn, masterInfoPort, - serversName); + serversName, + tableRegionStatesCount + ); } private static class ClusterMetricsImpl implements ClusterMetrics { @Nullable @@ -293,6 +319,7 @@ public final class ClusterMetricsBuilder { private final Boolean balancerOn; private final int masterInfoPort; private final List serversName; + private final Map tableRegionStatesCount; ClusterMetricsImpl(String hbaseVersion, List deadServerNames, Map liveServerMetrics, @@ -303,7 +330,8 @@ public final class ClusterMetricsBuilder { List masterCoprocessorNames, Boolean balancerOn, int masterInfoPort, - List serversName) { + List serversName, + Map tableRegionStatesCount) { this.hbaseVersion = hbaseVersion; this.deadServerNames = Preconditions.checkNotNull(deadServerNames); this.liveServerMetrics = Preconditions.checkNotNull(liveServerMetrics); @@ -315,6 +343,7 @@ public final class ClusterMetricsBuilder { this.balancerOn = balancerOn; this.masterInfoPort = masterInfoPort; this.serversName = serversName; + this.tableRegionStatesCount = Preconditions.checkNotNull(tableRegionStatesCount); } @Override @@ -372,6 +401,11 @@ public final class ClusterMetricsBuilder { return Collections.unmodifiableList(serversName); } + @Override + public Map getTableRegionStatesCount() { + return Collections.unmodifiableMap(tableRegionStatesCount); + } + @Override public String toString() { StringBuilder sb = new StringBuilder(1024); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index 33c30ddbb81..6fdb588a4f3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -26,6 +26,8 @@ import java.util.Collection; import java.util.List; import java.util.Map; import java.util.stream.Collectors; + +import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.master.RegionState; import org.apache.yetus.audience.InterfaceAudience; @@ -349,6 +351,11 @@ public class ClusterStatus implements ClusterMetrics { return metrics.getServersName(); } + @Override + public Map getTableRegionStatesCount() { + return metrics.getTableRegionStatesCount(); + } + @Override public String toString() { StringBuilder sb = new StringBuilder(1024); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java new file mode 100644 index 00000000000..1e1ce95113b --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java @@ -0,0 +1,167 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public final class RegionStatesCount { + + private int openRegions; + private int splitRegions; + private int closedRegions; + private int regionsInTransition; + private int totalRegions; + + private RegionStatesCount() { + } + + public int getClosedRegions() { + return closedRegions; + } + + public int getOpenRegions() { + return openRegions; + } + + public int getSplitRegions() { + return splitRegions; + } + + public int getRegionsInTransition() { + return regionsInTransition; + } + + public int getTotalRegions() { + return totalRegions; + } + + private void setClosedRegions(int closedRegions) { + this.closedRegions = closedRegions; + } + + private void setOpenRegions(int openRegions) { + this.openRegions = openRegions; + } + + private void setSplitRegions(int splitRegions) { + this.splitRegions = splitRegions; + } + + private void setRegionsInTransition(int regionsInTransition) { + this.regionsInTransition = regionsInTransition; + } + + private void setTotalRegions(int totalRegions) { + this.totalRegions = totalRegions; + } + + public static class RegionStatesCountBuilder { + private int openRegions; + private int splitRegions; + private int closedRegions; + private int regionsInTransition; + private int totalRegions; + + public RegionStatesCountBuilder setOpenRegions(int openRegions) { + this.openRegions = openRegions; + return this; + } + + public RegionStatesCountBuilder setSplitRegions(int splitRegions) { + this.splitRegions = splitRegions; + return this; + } + + public RegionStatesCountBuilder setClosedRegions(int closedRegions) { + this.closedRegions = closedRegions; + return this; + } + + public RegionStatesCountBuilder setRegionsInTransition(int regionsInTransition) { + this.regionsInTransition = regionsInTransition; + return this; + } + + public RegionStatesCountBuilder setTotalRegions(int totalRegions) { + this.totalRegions = totalRegions; + return this; + } + + public RegionStatesCount build() { + RegionStatesCount regionStatesCount=new RegionStatesCount(); + regionStatesCount.setOpenRegions(openRegions); + regionStatesCount.setClosedRegions(closedRegions); + regionStatesCount.setRegionsInTransition(regionsInTransition); + regionStatesCount.setSplitRegions(splitRegions); + regionStatesCount.setTotalRegions(totalRegions); + return regionStatesCount; + } + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder("RegionStatesCount{"); + sb.append("openRegions=").append(openRegions); + sb.append(", splitRegions=").append(splitRegions); + sb.append(", closedRegions=").append(closedRegions); + sb.append(", regionsInTransition=").append(regionsInTransition); + sb.append(", totalRegions=").append(totalRegions); + sb.append('}'); + return sb.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + RegionStatesCount that = (RegionStatesCount) o; + + if (openRegions != that.openRegions) { + return false; + } + if (splitRegions != that.splitRegions) { + return false; + } + if (closedRegions != that.closedRegions) { + return false; + } + if (regionsInTransition != that.regionsInTransition) { + return false; + } + return totalRegions == that.totalRegions; + } + + @Override + public int hashCode() { + int result = openRegions; + result = 31 * result + splitRegions; + result = 31 * result + closedRegions; + result = 31 * result + regionsInTransition; + result = 31 * result + totalRegions; + return result; + } + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 36870b636b5..0a2063de729 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -80,6 +80,7 @@ import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionLoadStats; +import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.SnapshotDescription; @@ -3309,4 +3310,51 @@ public final class ProtobufUtil { } return Collections.emptySet(); } + + public static ClusterStatusProtos.RegionStatesCount toTableRegionStatesCount( + RegionStatesCount regionStatesCount) { + int openRegions = 0; + int splitRegions = 0; + int closedRegions = 0; + int regionsInTransition = 0; + int totalRegions = 0; + if (regionStatesCount != null) { + openRegions = regionStatesCount.getOpenRegions(); + splitRegions = regionStatesCount.getSplitRegions(); + closedRegions = regionStatesCount.getClosedRegions(); + regionsInTransition = regionStatesCount.getRegionsInTransition(); + totalRegions = regionStatesCount.getTotalRegions(); + } + return ClusterStatusProtos.RegionStatesCount.newBuilder() + .setOpenRegions(openRegions) + .setSplitRegions(splitRegions) + .setClosedRegions(closedRegions) + .setRegionsInTransition(regionsInTransition) + .setTotalRegions(totalRegions) + .build(); + } + + public static RegionStatesCount toTableRegionStatesCount( + ClusterStatusProtos.RegionStatesCount regionStatesCount) { + int openRegions = 0; + int splitRegions = 0; + int closedRegions = 0; + int regionsInTransition = 0; + int totalRegions = 0; + if (regionStatesCount != null) { + closedRegions = regionStatesCount.getClosedRegions(); + regionsInTransition = regionStatesCount.getRegionsInTransition(); + openRegions = regionStatesCount.getOpenRegions(); + splitRegions = regionStatesCount.getSplitRegions(); + totalRegions = regionStatesCount.getTotalRegions(); + } + return new RegionStatesCount.RegionStatesCountBuilder() + .setOpenRegions(openRegions) + .setSplitRegions(splitRegions) + .setClosedRegions(closedRegions) + .setRegionsInTransition(regionsInTransition) + .setTotalRegions(totalRegions) + .build(); + } + } diff --git a/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto b/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto index d1487af61c0..30315beb374 100644 --- a/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto @@ -231,6 +231,19 @@ message LiveServerInfo { required ServerLoad server_load = 2; } +message RegionStatesCount { + required uint32 open_regions = 1; + required uint32 split_regions = 2; + required uint32 closed_regions = 3; + required uint32 regions_in_transition = 4; + required uint32 total_regions = 5; +} + +message TableRegionStatesCount { + required TableName table_name = 1; + required RegionStatesCount region_states_count = 2; +} + message ClusterStatus { optional HBaseVersionFileContent hbase_version = 1; repeated LiveServerInfo live_servers = 2; @@ -243,6 +256,7 @@ message ClusterStatus { optional bool balancer_on = 9; optional int32 master_info_port = 10 [default = -1]; repeated ServerName servers_name = 11; + repeated TableRegionStatesCount table_region_states_count = 12; } enum Option { @@ -257,4 +271,5 @@ enum Option { BALANCER_ON = 8; MASTER_INFO_PORT = 9; SERVERS_NAME = 10; + TABLE_TO_REGIONS_COUNT = 11; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 882e178dd27..7ba84b1466c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableState; @@ -2562,6 +2563,24 @@ public class HMaster extends HRegionServer implements MasterServices { } break; } + case TABLE_TO_REGIONS_COUNT: { + if (isActiveMaster() && isInitialized() && assignmentManager != null) { + try { + Map tableRegionStatesCountMap = new HashMap<>(); + Map tableDescriptorMap = getTableDescriptors().getAll(); + for (TableDescriptor tableDescriptor : tableDescriptorMap.values()) { + TableName tableName = tableDescriptor.getTableName(); + RegionStatesCount regionStatesCount = assignmentManager + .getRegionStatesCount(tableName); + tableRegionStatesCountMap.put(tableName, regionStatesCount); + } + builder.setTableRegionStatesCount(tableRegionStatesCountMap); + } catch (IOException e) { + LOG.error("Error while populating TABLE_TO_REGIONS_COUNT for Cluster Metrics..", e); + } + } + break; + } } } return builder.build(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 6889352acdb..bb3fc28cb57 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.client.DoNotRetryRegionException; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; @@ -2081,4 +2082,41 @@ public class AssignmentManager { } return rsReportsSnapshot; } + + /** + * Provide regions state count for given table. + * e.g howmany regions of give table are opened/closed/rit etc + * + * @param tableName TableName + * @return region states count + */ + public RegionStatesCount getRegionStatesCount(TableName tableName) { + int openRegionsCount = 0; + int closedRegionCount = 0; + int ritCount = 0; + int splitRegionCount = 0; + int totalRegionCount = 0; + if (!isTableDisabled(tableName)) { + final List states = regionStates.getTableRegionStates(tableName); + for (RegionState regionState : states) { + if (regionState.isOpened()) { + openRegionsCount++; + } else if (regionState.isClosed()) { + closedRegionCount++; + } else if (regionState.isSplit()) { + splitRegionCount++; + } + } + totalRegionCount = states.size(); + ritCount = totalRegionCount - openRegionsCount - splitRegionCount; + } + return new RegionStatesCount.RegionStatesCountBuilder() + .setOpenRegions(openRegionsCount) + .setClosedRegions(closedRegionCount) + .setSplitRegions(splitRegionCount) + .setRegionsInTransition(ritCount) + .setTotalRegions(totalRegionCount) + .build(); + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java index 2ea03a6b1b9..cbba5054140 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java @@ -30,6 +30,9 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.AsyncAdmin; import org.apache.hadoop.hbase.client.AsyncConnection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionStatesCount; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; @@ -38,6 +41,7 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.junit.AfterClass; @@ -60,6 +64,9 @@ public class TestClientClusterMetrics { private final static int MASTERS = 3; private static MiniHBaseCluster CLUSTER; private static HRegionServer DEAD; + private static final TableName TABLE_NAME = TableName.valueOf("test"); + private static final byte[] CF = Bytes.toBytes("cf"); + @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -123,6 +130,11 @@ public class TestClientClusterMetrics { defaults.getLiveServerMetrics().size()); Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort()); Assert.assertEquals(origin.getServersName().size(), defaults.getServersName().size()); + origin.getTableRegionStatesCount().forEach(((tableName, regionStatesCount) -> { + RegionStatesCount defaultRegionStatesCount = defaults.getTableRegionStatesCount() + .get(tableName); + Assert.assertEquals(defaultRegionStatesCount, regionStatesCount); + })); } } @@ -167,6 +179,38 @@ public class TestClientClusterMetrics { Assert.assertEquals(numRs, metrics.getServersName().size()); } + @Test + public void testRegionStatesCount() throws Exception { + Table table = UTIL.createTable(TABLE_NAME, CF); + table.put(new Put(Bytes.toBytes("k1")) + .addColumn(CF, Bytes.toBytes("q1"), Bytes.toBytes("v1"))); + table.put(new Put(Bytes.toBytes("k2")) + .addColumn(CF, Bytes.toBytes("q2"), Bytes.toBytes("v2"))); + table.put(new Put(Bytes.toBytes("k3")) + .addColumn(CF, Bytes.toBytes("q3"), Bytes.toBytes("v3"))); + + ClusterMetrics metrics = ADMIN.getClusterMetrics(); + Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 3); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) + .getRegionsInTransition(), 0); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) + .getOpenRegions(), 1); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) + .getTotalRegions(), 1); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) + .getClosedRegions(), 0); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) + .getSplitRegions(), 0); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME) + .getRegionsInTransition(), 0); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME) + .getOpenRegions(), 1); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME) + .getTotalRegions(), 1); + + UTIL.deleteTable(TABLE_NAME); + } + @Test public void testMasterAndBackupMastersStatus() throws Exception { // get all the master threads @@ -224,6 +268,17 @@ public class TestClientClusterMetrics { Assert.assertEquals(postCount + 1, MyObserver.POST_COUNT.get()); } + private static void insertData(final TableName tableName, int startRow, int rowCount) + throws IOException { + Table t = UTIL.getConnection().getTable(tableName); + Put p; + for (int i = 0; i < rowCount; i++) { + p = new Put(Bytes.toBytes("" + (startRow + i))); + p.addColumn(CF, Bytes.toBytes("val1"), Bytes.toBytes(i)); + t.put(p); + } + } + public static class MyObserver implements MasterCoprocessor, MasterObserver { private static final AtomicInteger PRE_COUNT = new AtomicInteger(0); private static final AtomicInteger POST_COUNT = new AtomicInteger(0); diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 16049580cb9..ba84893d01a 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -568,16 +568,21 @@ module Hbase # Table should exist raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name) - status = Pair.new begin - status = @admin.getAlterStatus(org.apache.hadoop.hbase.TableName.valueOf(table_name)) - if status.getSecond != 0 - puts "#{status.getSecond - status.getFirst}/#{status.getSecond} regions updated." + cluster_metrics = @admin.getClusterMetrics + table_region_status = cluster_metrics + .getTableRegionStatesCount + .get(org.apache.hadoop.hbase.TableName.valueOf(table_name)) + if table_region_status.getTotalRegions != 0 + updated_regions = table_region_status.getTotalRegions - + table_region_status.getRegionsInTransition - + table_region_status.getClosedRegions + puts "#{updated_regions}/#{table_region_status.getTotalRegions} regions updated." else puts 'All regions updated.' end sleep 1 - end while !status.nil? && status.getFirst != 0 + end while !table_region_status.nil? && table_region_status.getRegionsInTransition != 0 puts 'Done.' end diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb index c9c33e94719..b15449d3ecb 100644 --- a/hbase-shell/src/test/ruby/hbase/admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb @@ -110,6 +110,13 @@ module Hbase #------------------------------------------------------------------------------- + define_test 'alter_status should work' do + output = capture_stdout { command(:alter_status, @test_name) } + assert(output.include?('1/1 regions updated')) + end + + #------------------------------------------------------------------------------- + define_test "compact should work" do command(:compact, 'hbase:meta') end