diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java new file mode 100644 index 00000000000..103c107ddfe --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java @@ -0,0 +1,198 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.List; +import java.util.Map; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Metrics information on the HBase cluster. + *

+ * ClusterMetrics provides clients with information such as: + *

+ * {@link Option} provides a way to get desired ClusterStatus information. + * The following codes will get all the cluster information. + *
+ * {@code
+ * // Original version still works
+ * Admin admin = connection.getAdmin();
+ * ClusterMetrics metrics = admin.getClusterStatus();
+ * // or below, a new version which has the same effects
+ * ClusterMetrics metrics = admin.getClusterStatus(EnumSet.allOf(Option.class));
+ * }
+ * 
+ * If information about live servers is the only wanted. + * then codes in the following way: + *
+ * {@code
+ * Admin admin = connection.getAdmin();
+ * ClusterMetrics metrics = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
+ * }
+ * 
+ */ +@InterfaceAudience.Public +public interface ClusterMetrics { + + /** + * @return the HBase version string as reported by the HMaster + */ + @Nullable + String getHBaseVersion(); + + /** + * @return the names of region servers on the dead list + */ + List getDeadServerNames(); + + /** + * @return the names of region servers on the live list + */ + Map getLiveServerMetrics(); + + /** + * @return the number of regions deployed on the cluster + */ + default int getRegionCount() { + return getLiveServerMetrics().entrySet().stream() + .mapToInt(v -> v.getValue().getRegionMetrics().size()).sum(); + } + + /** + * @return the number of requests since last report + */ + default long getRequestCount() { + return getLiveServerMetrics().entrySet().stream() + .flatMap(v -> v.getValue().getRegionMetrics().values().stream()) + .mapToLong(RegionMetrics::getRequestCount).sum(); + } + + /** + * Returns detailed information about the current master {@link ServerName}. + * @return current master information if it exists + */ + @Nullable + ServerName getMasterName(); + + /** + * @return the names of backup masters + */ + List getBackupMasterNames(); + + @InterfaceAudience.Private + List getRegionStatesInTransition(); + + @Nullable + String getClusterId(); + + List getMasterCoprocessorNames(); + + default long getLastMajorCompactionTimestamp(TableName table) { + return getLiveServerMetrics().values().stream() + .flatMap(s -> s.getRegionMetrics().values().stream()) + .filter(r -> RegionInfo.getTable(r.getRegionName()).equals(table)) + .mapToLong(RegionMetrics::getLastMajorCompactionTimestamp).min().orElse(0); + } + + default long getLastMajorCompactionTimestamp(byte[] regionName) { + return getLiveServerMetrics().values().stream() + .filter(s -> s.getRegionMetrics().containsKey(regionName)) + .findAny() + .map(s -> s.getRegionMetrics().get(regionName).getLastMajorCompactionTimestamp()) + .orElse(0L); + } + + @Nullable + Boolean getBalancerOn(); + + int getMasterInfoPort(); + + /** + * @return the average cluster load + */ + default double getAverageLoad() { + int serverSize = getLiveServerMetrics().size(); + if (serverSize == 0) { + return 0; + } + return (double)getRegionCount() / (double)serverSize; + } + + /** + * Kinds of ClusterMetrics + */ + enum Option { + /** + * metrics about hbase version + */ + HBASE_VERSION, + /** + * metrics about cluster id + */ + CLUSTER_ID, + /** + * metrics about balancer is on or not + */ + BALANCER_ON, + /** + * metrics about live region servers + */ + LIVE_SERVERS, + /** + * metrics about dead region servers + */ + DEAD_SERVERS, + /** + * metrics about master name + */ + MASTER, + /** + * metrics about backup masters name + */ + BACKUP_MASTERS, + /** + * metrics about master coprocessors + */ + MASTER_COPROCESSORS, + /** + * metrics about regions in transition + */ + REGIONS_IN_TRANSITION, + /** + * metrics info port + */ + MASTER_INFO_PORT + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java new file mode 100644 index 00000000000..ed669a56598 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java @@ -0,0 +1,397 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Collections; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.stream.Collectors; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; + +@InterfaceAudience.Private +public final class ClusterMetricsBuilder { + + public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics metrics) { + ClusterStatusProtos.ClusterStatus.Builder builder + = ClusterStatusProtos.ClusterStatus.newBuilder() + .addAllBackupMasters(metrics.getBackupMasterNames().stream() + .map(ProtobufUtil::toServerName).collect(Collectors.toList())) + .addAllDeadServers(metrics.getDeadServerNames().stream() + .map(ProtobufUtil::toServerName).collect(Collectors.toList())) + .addAllLiveServers(metrics.getLiveServerMetrics().entrySet().stream() + .map(s -> ClusterStatusProtos.LiveServerInfo + .newBuilder() + .setServer(ProtobufUtil.toServerName(s.getKey())) + .setServerLoad(ServerMetricsBuilder.toServerLoad(s.getValue())) + .build()) + .collect(Collectors.toList())) + .addAllMasterCoprocessors(metrics.getMasterCoprocessorNames().stream() + .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) + .collect(Collectors.toList())) + .addAllRegionsInTransition(metrics.getRegionStatesInTransition().stream() + .map(r -> ClusterStatusProtos.RegionInTransition + .newBuilder() + .setSpec(HBaseProtos.RegionSpecifier + .newBuilder() + .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) + .setValue(UnsafeByteOperations.unsafeWrap(r.getRegion().getRegionName())) + .build()) + .setRegionState(r.convert()) + .build()) + .collect(Collectors.toList())) + .setMasterInfoPort(metrics.getMasterInfoPort()); + if (metrics.getMasterName() != null) { + builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName()))); + } + if (metrics.getBalancerOn() != null) { + builder.setBalancerOn(metrics.getBalancerOn()); + } + if (metrics.getClusterId() != null) { + builder.setClusterId(new ClusterId(metrics.getClusterId()).convert()); + } + if (metrics.getHBaseVersion() != null) { + builder.setHbaseVersion( + FSProtos.HBaseVersionFileContent.newBuilder() + .setVersion(metrics.getHBaseVersion())); + } + return builder.build(); + } + + public static ClusterMetrics toClusterMetrics( + ClusterStatusProtos.ClusterStatus proto) { + ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder(); + builder.setLiveServerMetrics(proto.getLiveServersList().stream() + .collect(Collectors.toMap(e -> ProtobufUtil.toServerName(e.getServer()), + ServerMetricsBuilder::toServerMetrics))) + .setDeadServerNames(proto.getDeadServersList().stream() + .map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) + .setBackerMasterNames(proto.getBackupMastersList().stream() + .map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) + .setRegionsInTransition(proto.getRegionsInTransitionList().stream() + .map(ClusterStatusProtos.RegionInTransition::getRegionState) + .map(RegionState::convert) + .collect(Collectors.toList())) + .setMasterCoprocessorNames(proto.getMasterCoprocessorsList().stream() + .map(HBaseProtos.Coprocessor::getName) + .collect(Collectors.toList())); + if (proto.hasClusterId()) { + builder.setClusterId(ClusterId.convert(proto.getClusterId()).toString()); + } + + if (proto.hasHbaseVersion()) { + builder.setHBaseVersion(proto.getHbaseVersion().getVersion()); + } + + if (proto.hasMaster()) { + builder.setMasterName(ProtobufUtil.toServerName(proto.getMaster())); + } + + if (proto.hasBalancerOn()) { + builder.setBalancerOn(proto.getBalancerOn()); + } + + if (proto.hasMasterInfoPort()) { + builder.setMasterInfoPort(proto.getMasterInfoPort()); + } + return builder.build(); + } + + /** + * Convert ClusterStatusProtos.Option to ClusterMetrics.Option + * @param option a ClusterStatusProtos.Option + * @return converted ClusterMetrics.Option + */ + public static ClusterMetrics.Option toOption(ClusterStatusProtos.Option option) { + switch (option) { + case HBASE_VERSION: return ClusterMetrics.Option.HBASE_VERSION; + case LIVE_SERVERS: return ClusterMetrics.Option.LIVE_SERVERS; + case DEAD_SERVERS: return ClusterMetrics.Option.DEAD_SERVERS; + case REGIONS_IN_TRANSITION: return ClusterMetrics.Option.REGIONS_IN_TRANSITION; + case CLUSTER_ID: return ClusterMetrics.Option.CLUSTER_ID; + case MASTER_COPROCESSORS: return ClusterMetrics.Option.MASTER_COPROCESSORS; + case MASTER: return ClusterMetrics.Option.MASTER; + case BACKUP_MASTERS: return ClusterMetrics.Option.BACKUP_MASTERS; + case BALANCER_ON: return ClusterMetrics.Option.BALANCER_ON; + case MASTER_INFO_PORT: return ClusterMetrics.Option.MASTER_INFO_PORT; + // should not reach here + default: throw new IllegalArgumentException("Invalid option: " + option); + } + } + + /** + * Convert ClusterMetrics.Option to ClusterStatusProtos.Option + * @param option a ClusterMetrics.Option + * @return converted ClusterStatusProtos.Option + */ + public static ClusterStatusProtos.Option toOption(ClusterMetrics.Option option) { + switch (option) { + case HBASE_VERSION: return ClusterStatusProtos.Option.HBASE_VERSION; + case LIVE_SERVERS: return ClusterStatusProtos.Option.LIVE_SERVERS; + case DEAD_SERVERS: return ClusterStatusProtos.Option.DEAD_SERVERS; + case REGIONS_IN_TRANSITION: return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION; + case CLUSTER_ID: return ClusterStatusProtos.Option.CLUSTER_ID; + case MASTER_COPROCESSORS: return ClusterStatusProtos.Option.MASTER_COPROCESSORS; + case MASTER: return ClusterStatusProtos.Option.MASTER; + case BACKUP_MASTERS: return ClusterStatusProtos.Option.BACKUP_MASTERS; + case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON; + case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT; + // should not reach here + default: throw new IllegalArgumentException("Invalid option: " + option); + } + } + + /** + * Convert a list of ClusterStatusProtos.Option to an enum set of ClusterMetrics.Option + * @param options the pb options + * @return an enum set of ClusterMetrics.Option + */ + public static EnumSet toOptions(List options) { + return options.stream().map(ClusterMetricsBuilder::toOption) + .collect(Collectors.toCollection(() -> EnumSet.noneOf(ClusterMetrics.Option.class))); + } + + /** + * Convert an enum set of ClusterMetrics.Option to a list of ClusterStatusProtos.Option + * @param options the ClusterMetrics options + * @return a list of ClusterStatusProtos.Option + */ + public static List toOptions(EnumSet options) { + return options.stream().map(ClusterMetricsBuilder::toOption).collect(Collectors.toList()); + } + + public static ClusterMetricsBuilder newBuilder() { + return new ClusterMetricsBuilder(); + } + @Nullable + private String hbaseVersion; + private List deadServerNames = Collections.emptyList(); + private Map liveServerMetrics = new TreeMap<>(); + @Nullable + private ServerName masterName; + private List backupMasterNames = Collections.emptyList(); + private List regionsInTransition = Collections.emptyList(); + @Nullable + private String clusterId; + private List masterCoprocessorNames = Collections.emptyList(); + @Nullable + private Boolean balancerOn; + private int masterInfoPort; + + private ClusterMetricsBuilder() { + } + public ClusterMetricsBuilder setHBaseVersion(String value) { + this.hbaseVersion = value; + return this; + } + public ClusterMetricsBuilder setDeadServerNames(List value) { + this.deadServerNames = value; + return this; + } + + public ClusterMetricsBuilder setLiveServerMetrics(Map value) { + liveServerMetrics.putAll(value); + return this; + } + + public ClusterMetricsBuilder setMasterName(ServerName value) { + this.masterName = value; + return this; + } + public ClusterMetricsBuilder setBackerMasterNames(List value) { + this.backupMasterNames = value; + return this; + } + public ClusterMetricsBuilder setRegionsInTransition(List value) { + this.regionsInTransition = value; + return this; + } + public ClusterMetricsBuilder setClusterId(String value) { + this.clusterId = value; + return this; + } + public ClusterMetricsBuilder setMasterCoprocessorNames(List value) { + this.masterCoprocessorNames = value; + return this; + } + public ClusterMetricsBuilder setBalancerOn(@Nullable Boolean value) { + this.balancerOn = value; + return this; + } + public ClusterMetricsBuilder setMasterInfoPort(int value) { + this.masterInfoPort = value; + return this; + } + public ClusterMetrics build() { + return new ClusterMetricsImpl( + hbaseVersion, + deadServerNames, + liveServerMetrics, + masterName, + backupMasterNames, + regionsInTransition, + clusterId, + masterCoprocessorNames, + balancerOn, + masterInfoPort); + } + private static class ClusterMetricsImpl implements ClusterMetrics { + @Nullable + private final String hbaseVersion; + private final List deadServerNames; + private final Map liveServerMetrics; + @Nullable + private final ServerName masterName; + private final List backupMasterNames; + private final List regionsInTransition; + @Nullable + private final String clusterId; + private final List masterCoprocessorNames; + @Nullable + private final Boolean balancerOn; + private final int masterInfoPort; + + ClusterMetricsImpl(String hbaseVersion, List deadServerNames, + Map liveServerMetrics, + ServerName masterName, + List backupMasterNames, + List regionsInTransition, + String clusterId, + List masterCoprocessorNames, + Boolean balancerOn, + int masterInfoPort) { + this.hbaseVersion = hbaseVersion; + this.deadServerNames = Preconditions.checkNotNull(deadServerNames); + this.liveServerMetrics = Preconditions.checkNotNull(liveServerMetrics); + this.masterName = masterName; + this.backupMasterNames = Preconditions.checkNotNull(backupMasterNames); + this.regionsInTransition = Preconditions.checkNotNull(regionsInTransition); + this.clusterId = clusterId; + this.masterCoprocessorNames = Preconditions.checkNotNull(masterCoprocessorNames); + this.balancerOn = balancerOn; + this.masterInfoPort = masterInfoPort; + } + + @Override + public String getHBaseVersion() { + return hbaseVersion; + } + + @Override + public List getDeadServerNames() { + return Collections.unmodifiableList(deadServerNames); + } + + @Override + public Map getLiveServerMetrics() { + return Collections.unmodifiableMap(liveServerMetrics); + } + + @Override + public ServerName getMasterName() { + return masterName; + } + + @Override + public List getBackupMasterNames() { + return Collections.unmodifiableList(backupMasterNames); + } + + @Override + public List getRegionStatesInTransition() { + return Collections.unmodifiableList(regionsInTransition); + } + + @Override + public String getClusterId() { + return clusterId; + } + + @Override + public List getMasterCoprocessorNames() { + return Collections.unmodifiableList(masterCoprocessorNames); + } + + @Override + public Boolean getBalancerOn() { + return balancerOn; + } + + @Override + public int getMasterInfoPort() { + return masterInfoPort; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(1024); + sb.append("Master: " + getMasterName()); + + int backupMastersSize = getBackupMasterNames().size(); + sb.append("\nNumber of backup masters: " + backupMastersSize); + if (backupMastersSize > 0) { + for (ServerName serverName: getBackupMasterNames()) { + sb.append("\n " + serverName); + } + } + + int serversSize = getLiveServerMetrics().size(); + sb.append("\nNumber of live region servers: " + serversSize); + if (serversSize > 0) { + for (ServerName serverName : getLiveServerMetrics().keySet()) { + sb.append("\n " + serverName.getServerName()); + } + } + + int deadServerSize = getDeadServerNames().size(); + sb.append("\nNumber of dead region servers: " + deadServerSize); + if (deadServerSize > 0) { + for (ServerName serverName : getDeadServerNames()) { + sb.append("\n " + serverName); + } + } + + sb.append("\nAverage load: " + getAverageLoad()); + sb.append("\nNumber of requests: " + getRequestCount()); + sb.append("\nNumber of regions: " + getRegionCount()); + + int ritSize = getRegionStatesInTransition().size(); + sb.append("\nNumber of regions in transition: " + ritSize); + if (ritSize > 0) { + for (RegionState state : getRegionStatesInTransition()) { + sb.append("\n " + state.toDescriptiveString()); + } + } + return sb.toString(); + } + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index 6b4c03abc54..8cbf302c757 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -19,16 +19,17 @@ package org.apache.hadoop.hbase; +import edu.umd.cs.findbugs.annotations.Nullable; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.com.google.common.base.Objects; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.master.RegionState; /** * Status information on the HBase cluster. @@ -47,7 +48,7 @@ import org.apache.hadoop.hbase.master.RegionState; *
  • Regions in transition at master
  • *
  • The unique cluster ID
  • * - * {@link Option} provides a way to get desired ClusterStatus information. + * {@link ClusterMetrics.Option} provides a way to get desired ClusterStatus information. * The following codes will get all the cluster information. *
      * {@code
    @@ -66,28 +67,20 @@ import org.apache.hadoop.hbase.master.RegionState;
      * ClusterStatus status = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
      * }
      * 
    + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link ClusterMetrics} instead. */ @InterfaceAudience.Public -public class ClusterStatus { +@Deprecated +public class ClusterStatus implements ClusterMetrics { // TODO: remove this in 3.0 private static final byte VERSION = 2; - private String hbaseVersion; - private Map liveServers; - private List deadServers; - private ServerName master; - private List backupMasters; - private List intransition; - private String clusterId; - private String[] masterCoprocessors; - private Boolean balancerOn; - private int masterInfoPort; + private final ClusterMetrics metrics; /** - * Use {@link ClusterStatus.Builder} to construct a ClusterStatus instead. * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * (HBASE-15511). */ @Deprecated public ClusterStatus(final String hbaseVersion, final String clusterid, @@ -100,61 +93,52 @@ public class ClusterStatus { final Boolean balancerOn, final int masterInfoPort) { // TODO: make this constructor private - this.hbaseVersion = hbaseVersion; - this.liveServers = servers; - this.deadServers = new ArrayList<>(deadServers); - this.master = master; - this.backupMasters = new ArrayList<>(backupMasters); - this.intransition = rit; - this.clusterId = clusterid; - this.masterCoprocessors = masterCoprocessors; - this.balancerOn = balancerOn; - this.masterInfoPort = masterInfoPort; + this(ClusterMetricsBuilder.newBuilder().setHBaseVersion(hbaseVersion) + .setDeadServerNames(new ArrayList<>(deadServers)) + .setLiveServerMetrics(servers.entrySet().stream() + .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()))) + .setBackerMasterNames(new ArrayList<>(backupMasters)).setBalancerOn(balancerOn) + .setClusterId(clusterid) + .setMasterCoprocessorNames(Arrays.asList(masterCoprocessors)) + .setMasterName(master) + .setMasterInfoPort(masterInfoPort) + .setRegionsInTransition(rit) + .build()); } - private ClusterStatus(final String hbaseVersion, final String clusterid, - final Map servers, - final List deadServers, - final ServerName master, - final List backupMasters, - final List rit, - final String[] masterCoprocessors, - final Boolean balancerOn, - final int masterInfoPort) { - this.hbaseVersion = hbaseVersion; - this.liveServers = servers; - this.deadServers = deadServers; - this.master = master; - this.backupMasters = backupMasters; - this.intransition = rit; - this.clusterId = clusterid; - this.masterCoprocessors = masterCoprocessors; - this.balancerOn = balancerOn; - this.masterInfoPort = masterInfoPort; + @InterfaceAudience.Private + public ClusterStatus(ClusterMetrics metrics) { + this.metrics = metrics; } /** * @return the names of region servers on the dead list */ + @Override public List getDeadServerNames() { - if (deadServers == null) { - return Collections.emptyList(); - } - return Collections.unmodifiableList(deadServers); + return metrics.getDeadServerNames(); + } + + @Override + public Map getLiveServerMetrics() { + return metrics.getLiveServerMetrics(); } /** - * @return the number of region servers in the cluster - */ + * @return the number of region servers in the cluster + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getLiveServerMetrics()}. + */ + @Deprecated public int getServersSize() { - return liveServers != null ? liveServers.size() : 0; + return metrics.getLiveServerMetrics().size(); } /** * @return the number of dead region servers in the cluster * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 * (HBASE-13656). - * Use {@link #getDeadServersSize()}. + * Use {@link #getDeadServerNames()}. */ @Deprecated public int getDeadServers() { @@ -163,32 +147,22 @@ public class ClusterStatus { /** * @return the number of dead region servers in the cluster + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getDeadServerNames()}. */ + @Deprecated public int getDeadServersSize() { - return deadServers != null ? deadServers.size() : 0; - } - - - /** - * @return the average cluster load - */ - public double getAverageLoad() { - int load = getRegionsCount(); - int serverSize = getServersSize(); - return serverSize != 0 ? (double)load / (double)serverSize : 0.0; + return metrics.getDeadServerNames().size(); } /** * @return the number of regions deployed on the cluster + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionCount()}. */ + @Deprecated public int getRegionsCount() { - int count = 0; - if (liveServers != null && !liveServers.isEmpty()) { - for (Map.Entry e: this.liveServers.entrySet()) { - count = count + e.getValue().getNumberOfRegions(); - } - } - return count; + return getRegionCount(); } /** @@ -201,20 +175,32 @@ public class ClusterStatus { return (int) getRequestCount(); } - public long getRequestCount() { - if (liveServers == null) { - return 0L; - } - return liveServers.values().stream() - .mapToLong(ServerLoad::getNumberOfRequests) - .sum(); + @Nullable + @Override + public ServerName getMasterName() { + return metrics.getMasterName(); + } + + @Override + public List getBackupMasterNames() { + return metrics.getBackupMasterNames(); + } + + @Override + public List getRegionStatesInTransition() { + return metrics.getRegionStatesInTransition(); } /** * @return the HBase version string as reported by the HMaster */ public String getHBaseVersion() { - return hbaseVersion; + return metrics.getHBaseVersion(); + } + + private Map getLiveServerLoads() { + return metrics.getLiveServerMetrics().entrySet().stream() + .collect(Collectors.toMap(e -> e.getKey(), e -> new ServerLoad(e.getValue()))); } @Override @@ -227,7 +213,7 @@ public class ClusterStatus { } ClusterStatus other = (ClusterStatus) o; return Objects.equal(getHBaseVersion(), other.getHBaseVersion()) && - Objects.equal(this.liveServers, other.liveServers) && + Objects.equal(getLiveServerLoads(), other.getLiveServerLoads()) && getDeadServerNames().containsAll(other.getDeadServerNames()) && Arrays.equals(getMasterCoprocessors(), other.getMasterCoprocessors()) && Objects.equal(getMaster(), other.getMaster()) && @@ -238,8 +224,7 @@ public class ClusterStatus { @Override public int hashCode() { - return Objects.hashCode(hbaseVersion, liveServers, deadServers, master, backupMasters, - clusterId, masterInfoPort); + return metrics.hashCode(); } /** @@ -251,111 +236,123 @@ public class ClusterStatus { return VERSION; } - // - // Getters - // - + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getLiveServerMetrics()} instead. + */ + @Deprecated public Collection getServers() { - if (liveServers == null) { - return Collections.emptyList(); - } - return Collections.unmodifiableCollection(this.liveServers.keySet()); + return metrics.getLiveServerMetrics().keySet(); } /** * Returns detailed information about the current master {@link ServerName}. * @return current master information if it exists + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getMasterName} instead. */ + @Deprecated public ServerName getMaster() { - return this.master; + return metrics.getMasterName(); } /** * @return the number of backup masters in the cluster + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getBackupMasterNames} instead. */ + @Deprecated public int getBackupMastersSize() { - return backupMasters != null ? backupMasters.size() : 0; + return metrics.getBackupMasterNames().size(); } /** * @return the names of backup masters + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getBackupMasterNames} instead. */ + @Deprecated public List getBackupMasters() { - if (backupMasters == null) { - return Collections.emptyList(); - } - return Collections.unmodifiableList(this.backupMasters); + return metrics.getBackupMasterNames(); } /** * @param sn * @return Server's load or null if not found. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getLiveServerMetrics} instead. */ + @Deprecated public ServerLoad getLoad(final ServerName sn) { - return liveServers != null ? liveServers.get(sn) : null; - } - - @InterfaceAudience.Private - public List getRegionsInTransition() { - if (intransition == null) { - return Collections.emptyList(); - } - return Collections.unmodifiableList(intransition); + ServerMetrics serverMetrics = metrics.getLiveServerMetrics().get(sn); + return serverMetrics == null ? null : new ServerLoad(serverMetrics); } public String getClusterId() { - return clusterId; + return metrics.getClusterId(); } + @Override + public List getMasterCoprocessorNames() { + return metrics.getMasterCoprocessorNames(); + } + + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getMasterCoprocessorNames} instead. + */ + @Deprecated public String[] getMasterCoprocessors() { - return masterCoprocessors; + List rval = metrics.getMasterCoprocessorNames(); + return rval.toArray(new String[rval.size()]); } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getLastMajorCompactionTimestamp(TableName)} instead. + */ + @Deprecated public long getLastMajorCompactionTsForTable(TableName table) { - long result = Long.MAX_VALUE; - for (ServerName server : getServers()) { - ServerLoad load = getLoad(server); - for (RegionLoad rl : load.getRegionsLoad().values()) { - if (table.equals(HRegionInfo.getTable(rl.getName()))) { - result = Math.min(result, rl.getLastMajorCompactionTs()); - } - } - } - return result == Long.MAX_VALUE ? 0 : result; + return metrics.getLastMajorCompactionTimestamp(table); } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getLastMajorCompactionTimestamp(byte[])} instead. + */ + @Deprecated public long getLastMajorCompactionTsForRegion(final byte[] region) { - for (ServerName server : getServers()) { - ServerLoad load = getLoad(server); - RegionLoad rl = load.getRegionsLoad().get(region); - if (rl != null) { - return rl.getLastMajorCompactionTs(); - } - } - return 0; + return metrics.getLastMajorCompactionTimestamp(region); } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * No flag in 2.0 + */ + @Deprecated public boolean isBalancerOn() { - return balancerOn != null && balancerOn; + return metrics.getBalancerOn() != null && metrics.getBalancerOn(); } + @Override public Boolean getBalancerOn() { - return balancerOn; + return metrics.getBalancerOn(); } + @Override public int getMasterInfoPort() { - return masterInfoPort; + return metrics.getMasterInfoPort(); } @Override public String toString() { StringBuilder sb = new StringBuilder(1024); - sb.append("Master: " + master); + sb.append("Master: " + metrics.getMasterName()); int backupMastersSize = getBackupMastersSize(); sb.append("\nNumber of backup masters: " + backupMastersSize); if (backupMastersSize > 0) { - for (ServerName serverName: backupMasters) { + for (ServerName serverName: metrics.getBackupMasterNames()) { sb.append("\n " + serverName); } } @@ -363,15 +360,15 @@ public class ClusterStatus { int serversSize = getServersSize(); sb.append("\nNumber of live region servers: " + serversSize); if (serversSize > 0) { - for (ServerName serverName: liveServers.keySet()) { + for (ServerName serverName : metrics.getLiveServerMetrics().keySet()) { sb.append("\n " + serverName.getServerName()); } } - int deadServerSize = getDeadServersSize(); + int deadServerSize = metrics.getDeadServerNames().size(); sb.append("\nNumber of dead region servers: " + deadServerSize); if (deadServerSize > 0) { - for (ServerName serverName: deadServers) { + for (ServerName serverName : metrics.getDeadServerNames()) { sb.append("\n " + serverName); } } @@ -380,109 +377,13 @@ public class ClusterStatus { sb.append("\nNumber of requests: " + getRequestCount()); sb.append("\nNumber of regions: " + getRegionsCount()); - int ritSize = (intransition != null) ? intransition.size() : 0; + int ritSize = metrics.getRegionStatesInTransition().size(); sb.append("\nNumber of regions in transition: " + ritSize); if (ritSize > 0) { - for (RegionState state: intransition) { + for (RegionState state: metrics.getRegionStatesInTransition()) { sb.append("\n " + state.toDescriptiveString()); } } return sb.toString(); } - - @InterfaceAudience.Private - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Builder for construct a ClusterStatus. - */ - @InterfaceAudience.Private - public static class Builder { - private String hbaseVersion = null; - private Map liveServers = null; - private List deadServers = null; - private ServerName master = null; - private List backupMasters = null; - private List intransition = null; - private String clusterId = null; - private String[] masterCoprocessors = null; - private Boolean balancerOn = null; - private int masterInfoPort = -1; - - private Builder() {} - - public Builder setHBaseVersion(String hbaseVersion) { - this.hbaseVersion = hbaseVersion; - return this; - } - - public Builder setLiveServers(Map liveServers) { - this.liveServers = liveServers; - return this; - } - - public Builder setDeadServers(List deadServers) { - this.deadServers = deadServers; - return this; - } - - public Builder setMaster(ServerName master) { - this.master = master; - return this; - } - - public Builder setBackupMasters(List backupMasters) { - this.backupMasters = backupMasters; - return this; - } - - public Builder setRegionState(List intransition) { - this.intransition = intransition; - return this; - } - - public Builder setClusterId(String clusterId) { - this.clusterId = clusterId; - return this; - } - - public Builder setMasterCoprocessors(String[] masterCoprocessors) { - this.masterCoprocessors = masterCoprocessors; - return this; - } - - public Builder setBalancerOn(Boolean balancerOn) { - this.balancerOn = balancerOn; - return this; - } - - public Builder setMasterInfoPort(int masterInfoPort) { - this.masterInfoPort = masterInfoPort; - return this; - } - - public ClusterStatus build() { - return new ClusterStatus(hbaseVersion, clusterId, liveServers, - deadServers, master, backupMasters, intransition, masterCoprocessors, - balancerOn, masterInfoPort); - } - } - - /** - * Kinds of ClusterStatus - */ - public enum Option { - HBASE_VERSION, /** status about hbase version */ - CLUSTER_ID, /** status about cluster id */ - BALANCER_ON, /** status about balancer is on or not */ - LIVE_SERVERS, /** status about live region servers */ - DEAD_SERVERS, /** status about dead region servers */ - MASTER, /** status about master */ - BACKUP_MASTERS, /** status about backup masters */ - MASTER_COPROCESSORS, /** status about master coprocessors */ - REGIONS_IN_TRANSITION, /** status about regions in transition */ - MASTER_INFO_PORT /** master info port **/ - } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java index a2b993fa787..811f83cb186 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java @@ -21,184 +21,341 @@ package org.apache.hadoop.hbase; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId; -import org.apache.hadoop.hbase.util.Bytes; +import java.util.Map; +import java.util.stream.Collectors; import org.apache.hadoop.hbase.util.Strings; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; /** - * Encapsulates per-region load metrics. - */ + * Encapsulates per-region load metrics. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link RegionMetrics} instead. + */ @InterfaceAudience.Public -public class RegionLoad { - +@Deprecated +public class RegionLoad implements RegionMetrics { + // DONT use this pb object since the byte array backed may be modified in rpc layer + // we keep this pb object for BC. protected ClusterStatusProtos.RegionLoad regionLoadPB; + private final RegionMetrics metrics; - @InterfaceAudience.Private + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") public RegionLoad(ClusterStatusProtos.RegionLoad regionLoadPB) { this.regionLoadPB = regionLoadPB; + this.metrics = RegionMetricsBuilder.toRegionMetrics(regionLoadPB); + } + + RegionLoad(RegionMetrics metrics) { + this.metrics = metrics; + this.regionLoadPB = RegionMetricsBuilder.toRegionLoad(metrics); } /** * @return the region name + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionName} instead. */ + @Deprecated public byte[] getName() { - return regionLoadPB.getRegionSpecifier().getValue().toByteArray(); + return metrics.getRegionName(); } - /** - * @return the region name as a string - */ - public String getNameAsString() { - return Bytes.toStringBinary(getName()); + @Override + public byte[] getRegionName() { + return metrics.getRegionName(); + } + + @Override + public int getStoreCount() { + return metrics.getStoreCount(); + } + + @Override + public int getStoreFileCount() { + return metrics.getStoreFileCount(); + } + + @Override + public Size getStoreFileSize() { + return metrics.getStoreFileSize(); + } + + @Override + public Size getMemStoreSize() { + return metrics.getMemStoreSize(); + } + + @Override + public long getReadRequestCount() { + return metrics.getReadRequestCount(); + } + + @Override + public long getFilteredReadRequestCount() { + return metrics.getFilteredReadRequestCount(); + } + + @Override + public Size getStoreFileIndexSize() { + return metrics.getStoreFileIndexSize(); + } + + @Override + public long getWriteRequestCount() { + return metrics.getWriteRequestCount(); + } + + @Override + public Size getStoreFileRootLevelIndexSize() { + return metrics.getStoreFileRootLevelIndexSize(); + } + + @Override + public Size getStoreFileUncompressedDataIndexSize() { + return metrics.getStoreFileUncompressedDataIndexSize(); + } + + @Override + public Size getBloomFilterSize() { + return metrics.getBloomFilterSize(); + } + + @Override + public long getCompactingCellCount() { + return metrics.getCompactingCellCount(); + } + + @Override + public long getCompactedCellCount() { + return metrics.getCompactedCellCount(); + } + + @Override + public long getCompletedSequenceId() { + return metrics.getCompletedSequenceId(); + } + + @Override + public Map getStoreSequenceId() { + return metrics.getStoreSequenceId(); + } + + @Override + public Size getUncompressedStoreFileSize() { + return metrics.getUncompressedStoreFileSize(); } /** * @return the number of stores + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getStoreCount} instead. */ + @Deprecated public int getStores() { - return regionLoadPB.getStores(); + return metrics.getStoreCount(); } /** * @return the number of storefiles + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getStoreFileCount} instead. */ + @Deprecated public int getStorefiles() { - return regionLoadPB.getStorefiles(); + return metrics.getStoreFileCount(); } /** * @return the total size of the storefiles, in MB + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getStoreFileSize} instead. */ + @Deprecated public int getStorefileSizeMB() { - return regionLoadPB.getStorefileSizeMB(); + return (int) metrics.getStoreFileSize().get(Size.Unit.MEGABYTE); } /** * @return the memstore size, in MB + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getMemStoreSize} instead. */ + @Deprecated public int getMemStoreSizeMB() { - return regionLoadPB.getMemStoreSizeMB(); + return (int) metrics.getMemStoreSize().get(Size.Unit.MEGABYTE); } /** * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 * ((HBASE-3935)). - * Use {@link #getStorefileIndexSizeKB()} instead. + * Use {@link #getStoreFileRootLevelIndexSize} instead. */ @Deprecated public int getStorefileIndexSizeMB() { // Return value divided by 1024 - return (int) (regionLoadPB.getStorefileIndexSizeKB() >> 10); + return (getRootIndexSizeKB() >> 10); } - public long getStorefileIndexSizeKB() { - return regionLoadPB.getStorefileIndexSizeKB(); + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getStoreFileRootLevelIndexSize()} instead. + */ + @Deprecated + public int getStorefileIndexSizeKB() { + return getRootIndexSizeKB(); } /** * @return the number of requests made to region + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRequestCount()} instead. */ + @Deprecated public long getRequestsCount() { - return getReadRequestsCount() + getWriteRequestsCount(); + return metrics.getRequestCount(); } /** * @return the number of read requests made to region + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getReadRequestCount} instead. */ + @Deprecated public long getReadRequestsCount() { - return regionLoadPB.getReadRequestsCount(); + return metrics.getReadRequestCount(); } /** * @return the number of filtered read requests made to region + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getFilteredReadRequestCount} instead. */ + @Deprecated public long getFilteredReadRequestsCount() { - return regionLoadPB.getFilteredReadRequestsCount(); + return metrics.getFilteredReadRequestCount(); } /** * @return the number of write requests made to region + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getWriteRequestCount} instead. */ + @Deprecated public long getWriteRequestsCount() { - return regionLoadPB.getWriteRequestsCount(); + return metrics.getWriteRequestCount(); } /** * @return The current total size of root-level indexes for the region, in KB. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getStoreFileRootLevelIndexSize} instead. */ + @Deprecated public int getRootIndexSizeKB() { - return regionLoadPB.getRootIndexSizeKB(); + return (int) metrics.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE); } /** * @return The total size of all index blocks, not just the root level, in KB. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getStoreFileUncompressedDataIndexSize} instead. */ + @Deprecated public int getTotalStaticIndexSizeKB() { - return regionLoadPB.getTotalStaticIndexSizeKB(); + return (int) metrics.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); } /** * @return The total size of all Bloom filter blocks, not just loaded into the * block cache, in KB. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getBloomFilterSize} instead. */ + @Deprecated public int getTotalStaticBloomSizeKB() { - return regionLoadPB.getTotalStaticBloomSizeKB(); + return (int) metrics.getBloomFilterSize().get(Size.Unit.KILOBYTE); } /** * @return the total number of kvs in current compaction + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getCompactingCellCount} instead. */ + @Deprecated public long getTotalCompactingKVs() { - return regionLoadPB.getTotalCompactingKVs(); + return metrics.getCompactingCellCount(); } /** * @return the number of already compacted kvs in current compaction + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getCompactedCellCount} instead. */ + @Deprecated public long getCurrentCompactedKVs() { - return regionLoadPB.getCurrentCompactedKVs(); + return metrics.getCompactedCellCount(); } /** * This does not really belong inside RegionLoad but its being done in the name of expediency. * @return the completed sequence Id for the region + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getCompletedSequenceId} instead. */ + @Deprecated public long getCompleteSequenceId() { - return regionLoadPB.getCompleteSequenceId(); + return metrics.getCompletedSequenceId(); } /** * @return completed sequence id per store. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getStoreSequenceId} instead. */ - public List getStoreCompleteSequenceId() { - return regionLoadPB.getStoreCompleteSequenceIdList(); + @Deprecated + public List getStoreCompleteSequenceId() { + return metrics.getStoreSequenceId().entrySet().stream() + .map(s -> ClusterStatusProtos.StoreSequenceId.newBuilder() + .setFamilyName(UnsafeByteOperations.unsafeWrap(s.getKey())) + .setSequenceId(s.getValue()) + .build()) + .collect(Collectors.toList()); } /** * @return the uncompressed size of the storefiles in MB. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getUncompressedStoreFileSize} instead. */ + @Deprecated public int getStoreUncompressedSizeMB() { - return regionLoadPB.getStoreUncompressedSizeMB(); + return (int) metrics.getUncompressedStoreFileSize().get(Size.Unit.KILOBYTE); } /** * @return the data locality of region in the regionserver. */ + @Override public float getDataLocality() { - if (regionLoadPB.hasDataLocality()) { - return regionLoadPB.getDataLocality(); - } - return 0.0f; + return metrics.getDataLocality(); + } + + @Override + public long getLastMajorCompactionTimestamp() { + return metrics.getLastMajorCompactionTimestamp(); } /** * @return the timestamp of the oldest hfile for any store of this region. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getLastMajorCompactionTimestamp} instead. */ + @Deprecated public long getLastMajorCompactionTs() { - return regionLoadPB.getLastMajorCompactionTs(); + return metrics.getLastMajorCompactionTimestamp(); } /** @@ -208,47 +365,43 @@ public class RegionLoad { public String toString() { StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "numberOfStores", this.getStores()); - sb = Strings.appendKeyValue(sb, "numberOfStorefiles", - this.getStorefiles()); - sb = Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", - this.getStoreUncompressedSizeMB()); - sb = Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp", - this.getLastMajorCompactionTs()); - sb = Strings.appendKeyValue(sb, "storefileSizeMB", - this.getStorefileSizeMB()); + Strings.appendKeyValue(sb, "numberOfStorefiles", this.getStorefiles()); + Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", + this.getStoreUncompressedSizeMB()); + Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp", + this.getLastMajorCompactionTs()); + Strings.appendKeyValue(sb, "storefileSizeMB", this.getStorefileSizeMB()); if (this.getStoreUncompressedSizeMB() != 0) { - sb = Strings.appendKeyValue(sb, "compressionRatio", + Strings.appendKeyValue(sb, "compressionRatio", String.format("%.4f", (float) this.getStorefileSizeMB() / (float) this.getStoreUncompressedSizeMB())); } - sb = Strings.appendKeyValue(sb, "memstoreSizeMB", + Strings.appendKeyValue(sb, "memstoreSizeMB", this.getMemStoreSizeMB()); - sb = Strings.appendKeyValue(sb, "storefileIndexSizeKB", - this.getStorefileIndexSizeKB()); - sb = Strings.appendKeyValue(sb, "readRequestsCount", + Strings.appendKeyValue(sb, "readRequestsCount", this.getReadRequestsCount()); - sb = Strings.appendKeyValue(sb, "writeRequestsCount", + Strings.appendKeyValue(sb, "writeRequestsCount", this.getWriteRequestsCount()); - sb = Strings.appendKeyValue(sb, "rootIndexSizeKB", + Strings.appendKeyValue(sb, "rootIndexSizeKB", this.getRootIndexSizeKB()); - sb = Strings.appendKeyValue(sb, "totalStaticIndexSizeKB", + Strings.appendKeyValue(sb, "totalStaticIndexSizeKB", this.getTotalStaticIndexSizeKB()); - sb = Strings.appendKeyValue(sb, "totalStaticBloomSizeKB", + Strings.appendKeyValue(sb, "totalStaticBloomSizeKB", this.getTotalStaticBloomSizeKB()); - sb = Strings.appendKeyValue(sb, "totalCompactingKVs", + Strings.appendKeyValue(sb, "totalCompactingKVs", this.getTotalCompactingKVs()); - sb = Strings.appendKeyValue(sb, "currentCompactedKVs", + Strings.appendKeyValue(sb, "currentCompactedKVs", this.getCurrentCompactedKVs()); float compactionProgressPct = Float.NaN; if (this.getTotalCompactingKVs() > 0) { compactionProgressPct = ((float) this.getCurrentCompactedKVs() / (float) this.getTotalCompactingKVs()); } - sb = Strings.appendKeyValue(sb, "compactionProgressPct", + Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); - sb = Strings.appendKeyValue(sb, "completeSequenceId", + Strings.appendKeyValue(sb, "completeSequenceId", this.getCompleteSequenceId()); - sb = Strings.appendKeyValue(sb, "dataLocality", + Strings.appendKeyValue(sb, "dataLocality", this.getDataLocality()); return sb.toString(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java new file mode 100644 index 00000000000..e73683f4833 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -0,0 +1,147 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.util.Map; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Encapsulates per-region load metrics. + */ +@InterfaceAudience.Public +public interface RegionMetrics { + + /** + * @return the region name + */ + byte[] getRegionName(); + + /** + * @return the number of stores + */ + int getStoreCount(); + + /** + * @return the number of storefiles + */ + int getStoreFileCount(); + + /** + * @return the total size of the storefiles + */ + Size getStoreFileSize(); + + /** + * @return the memstore size + */ + Size getMemStoreSize(); + + /** + * @return the number of read requests made to region + */ + long getReadRequestCount(); + + /** + * @return the number of write requests made to region + */ + long getWriteRequestCount(); + + /** + * @return the number of write requests and read requests made to region + */ + default long getRequestCount() { + return getReadRequestCount() + getWriteRequestCount(); + } + + /** + * @return the region name as a string + */ + default String getNameAsString() { + return Bytes.toStringBinary(getRegionName()); + } + + /** + * @return the number of filtered read requests made to region + */ + long getFilteredReadRequestCount(); + + /** + * TODO: why we pass the same value to different counters? Currently, the value from + * getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize() + * see HRegionServer#createRegionLoad. + * @return The current total size of root-level indexes for the region + */ + Size getStoreFileIndexSize(); + + /** + * @return The current total size of root-level indexes for the region + */ + Size getStoreFileRootLevelIndexSize(); + + /** + * @return The total size of all index blocks, not just the root level + */ + Size getStoreFileUncompressedDataIndexSize(); + + /** + * @return The total size of all Bloom filter blocks, not just loaded into the block cache + */ + Size getBloomFilterSize(); + + /** + * @return the total number of cells in current compaction + */ + long getCompactingCellCount(); + + /** + * @return the number of already compacted kvs in current compaction + */ + long getCompactedCellCount(); + + /** + * This does not really belong inside RegionLoad but its being done in the name of expediency. + * @return the completed sequence Id for the region + */ + long getCompletedSequenceId(); + + /** + * @return completed sequence id per store. + */ + Map getStoreSequenceId(); + + + /** + * @return the uncompressed size of the storefiles + */ + Size getUncompressedStoreFileSize(); + + /** + * @return the data locality of region in the regionserver. + */ + float getDataLocality(); + + /** + * @return the timestamp of the oldest hfile for any store of this region. + */ + long getLastMajorCompactionTimestamp(); + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java new file mode 100644 index 00000000000..0fb6d3d6499 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java @@ -0,0 +1,437 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.apache.hadoop.hbase.util.Strings; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; + +@InterfaceAudience.Private +public final class RegionMetricsBuilder { + + public static RegionMetrics toRegionMetrics(ClusterStatusProtos.RegionLoad regionLoadPB) { + return RegionMetricsBuilder + .newBuilder(regionLoadPB.getRegionSpecifier().getValue().toByteArray()) + .setBloomFilterSize(new Size(regionLoadPB.getTotalStaticBloomSizeKB(), Size.Unit.KILOBYTE)) + .setCompactedCellCount(regionLoadPB.getCurrentCompactedKVs()) + .setCompactingCellCount(regionLoadPB.getTotalCompactingKVs()) + .setCompletedSequenceId(regionLoadPB.getCompleteSequenceId()) + .setDataLocality(regionLoadPB.hasDataLocality() ? regionLoadPB.getDataLocality() : 0.0f) + .setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount()) + .setStoreFileUncompressedDataIndexSize(new Size(regionLoadPB.getTotalStaticIndexSizeKB(), + Size.Unit.KILOBYTE)) + .setLastMajorCompactionTimestamp(regionLoadPB.getLastMajorCompactionTs()) + .setMemStoreSize(new Size(regionLoadPB.getMemStoreSizeMB(), Size.Unit.MEGABYTE)) + .setReadRequestCount(regionLoadPB.getReadRequestsCount()) + .setWriteRequestCount(regionLoadPB.getWriteRequestsCount()) + .setStoreFileIndexSize(new Size(regionLoadPB.getStorefileIndexSizeKB(), + Size.Unit.KILOBYTE)) + .setStoreFileRootLevelIndexSize(new Size(regionLoadPB.getRootIndexSizeKB(), + Size.Unit.KILOBYTE)) + .setStoreCount(regionLoadPB.getStores()) + .setStoreFileCount(regionLoadPB.getStorefiles()) + .setStoreFileSize(new Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE)) + .setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream() + .collect(Collectors.toMap( + (ClusterStatusProtos.StoreSequenceId s) -> s.getFamilyName().toByteArray(), + ClusterStatusProtos.StoreSequenceId::getSequenceId))) + .setUncompressedStoreFileSize( + new Size(regionLoadPB.getStoreUncompressedSizeMB(),Size.Unit.MEGABYTE)) + .build(); + } + + private static List toStoreSequenceId( + Map ids) { + return ids.entrySet().stream() + .map(e -> ClusterStatusProtos.StoreSequenceId.newBuilder() + .setFamilyName(UnsafeByteOperations.unsafeWrap(e.getKey())) + .setSequenceId(e.getValue()) + .build()) + .collect(Collectors.toList()); + } + + public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) { + return ClusterStatusProtos.RegionLoad.newBuilder() + .setRegionSpecifier(HBaseProtos.RegionSpecifier + .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) + .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName())) + .build()) + .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize() + .get(Size.Unit.KILOBYTE)) + .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount()) + .setTotalCompactingKVs(regionMetrics.getCompactingCellCount()) + .setCompleteSequenceId(regionMetrics.getCompletedSequenceId()) + .setDataLocality(regionMetrics.getDataLocality()) + .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount()) + .setTotalStaticIndexSizeKB((int) regionMetrics.getStoreFileUncompressedDataIndexSize() + .get(Size.Unit.KILOBYTE)) + .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp()) + .setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE)) + .setReadRequestsCount(regionMetrics.getReadRequestCount()) + .setWriteRequestsCount(regionMetrics.getWriteRequestCount()) + .setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize() + .get(Size.Unit.KILOBYTE)) + .setRootIndexSizeKB((int) regionMetrics.getStoreFileRootLevelIndexSize() + .get(Size.Unit.KILOBYTE)) + .setStores(regionMetrics.getStoreCount()) + .setStorefiles(regionMetrics.getStoreCount()) + .setStorefileSizeMB((int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE)) + .addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId())) + .setStoreUncompressedSizeMB( + (int) regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE)) + .build(); + } + + public static RegionMetricsBuilder newBuilder(byte[] name) { + return new RegionMetricsBuilder(name); + } + + private final byte[] name; + private int storeCount; + private int storeFileCount; + private long compactingCellCount; + private long compactedCellCount; + private Size storeFileSize = Size.ZERO; + private Size memStoreSize = Size.ZERO; + private Size indexSize = Size.ZERO; + private Size rootLevelIndexSize = Size.ZERO; + private Size uncompressedDataIndexSize = Size.ZERO; + private Size bloomFilterSize = Size.ZERO; + private Size uncompressedStoreFileSize = Size.ZERO; + private long writeRequestCount; + private long readRequestCount; + private long filteredReadRequestCount; + private long completedSequenceId; + private Map storeSequenceIds = Collections.emptyMap(); + private float dataLocality; + private long lastMajorCompactionTimestamp; + private RegionMetricsBuilder(byte[] name) { + this.name = name; + } + + public RegionMetricsBuilder setStoreCount(int value) { + this.storeCount = value; + return this; + } + public RegionMetricsBuilder setStoreFileCount(int value) { + this.storeFileCount = value; + return this; + } + public RegionMetricsBuilder setCompactingCellCount(long value) { + this.compactingCellCount = value; + return this; + } + public RegionMetricsBuilder setCompactedCellCount(long value) { + this.compactedCellCount = value; + return this; + } + public RegionMetricsBuilder setStoreFileSize(Size value) { + this.storeFileSize = value; + return this; + } + public RegionMetricsBuilder setMemStoreSize(Size value) { + this.memStoreSize = value; + return this; + } + public RegionMetricsBuilder setStoreFileIndexSize(Size value) { + this.indexSize = value; + return this; + } + public RegionMetricsBuilder setStoreFileRootLevelIndexSize(Size value) { + this.rootLevelIndexSize = value; + return this; + } + public RegionMetricsBuilder setStoreFileUncompressedDataIndexSize(Size value) { + this.uncompressedDataIndexSize = value; + return this; + } + public RegionMetricsBuilder setBloomFilterSize(Size value) { + this.bloomFilterSize = value; + return this; + } + public RegionMetricsBuilder setUncompressedStoreFileSize(Size value) { + this.uncompressedStoreFileSize = value; + return this; + } + public RegionMetricsBuilder setWriteRequestCount(long value) { + this.writeRequestCount = value; + return this; + } + public RegionMetricsBuilder setReadRequestCount(long value) { + this.readRequestCount = value; + return this; + } + public RegionMetricsBuilder setFilteredReadRequestCount(long value) { + this.filteredReadRequestCount = value; + return this; + } + public RegionMetricsBuilder setCompletedSequenceId(long value) { + this.completedSequenceId = value; + return this; + } + public RegionMetricsBuilder setStoreSequenceIds(Map value) { + this.storeSequenceIds = value; + return this; + } + public RegionMetricsBuilder setDataLocality(float value) { + this.dataLocality = value; + return this; + } + public RegionMetricsBuilder setLastMajorCompactionTimestamp(long value) { + this.lastMajorCompactionTimestamp = value; + return this; + } + + public RegionMetrics build() { + return new RegionMetricsImpl(name, + storeCount, + storeFileCount, + compactingCellCount, + compactedCellCount, + storeFileSize, + memStoreSize, + indexSize, + rootLevelIndexSize, + uncompressedDataIndexSize, + bloomFilterSize, + uncompressedStoreFileSize, + writeRequestCount, + readRequestCount, + filteredReadRequestCount, + completedSequenceId, + storeSequenceIds, + dataLocality, + lastMajorCompactionTimestamp); + } + + private static class RegionMetricsImpl implements RegionMetrics { + private final byte[] name; + private final int storeCount; + private final int storeFileCount; + private final long compactingCellCount; + private final long compactedCellCount; + private final Size storeFileSize; + private final Size memStoreSize; + private final Size indexSize; + private final Size rootLevelIndexSize; + private final Size uncompressedDataIndexSize; + private final Size bloomFilterSize; + private final Size uncompressedStoreFileSize; + private final long writeRequestCount; + private final long readRequestCount; + private final long filteredReadRequestCount; + private final long completedSequenceId; + private final Map storeSequenceIds; + private final float dataLocality; + private final long lastMajorCompactionTimestamp; + RegionMetricsImpl(byte[] name, + int storeCount, + int storeFileCount, + final long compactingCellCount, + long compactedCellCount, + Size storeFileSize, + Size memStoreSize, + Size indexSize, + Size rootLevelIndexSize, + Size uncompressedDataIndexSize, + Size bloomFilterSize, + Size uncompressedStoreFileSize, + long writeRequestCount, + long readRequestCount, + long filteredReadRequestCount, + long completedSequenceId, + Map storeSequenceIds, + float dataLocality, + long lastMajorCompactionTimestamp) { + this.name = Preconditions.checkNotNull(name); + this.storeCount = storeCount; + this.storeFileCount = storeFileCount; + this.compactingCellCount = compactingCellCount; + this.compactedCellCount = compactedCellCount; + this.storeFileSize = Preconditions.checkNotNull(storeFileSize); + this.memStoreSize = Preconditions.checkNotNull(memStoreSize); + this.indexSize = Preconditions.checkNotNull(indexSize); + this.rootLevelIndexSize = Preconditions.checkNotNull(rootLevelIndexSize); + this.uncompressedDataIndexSize = Preconditions.checkNotNull(uncompressedDataIndexSize); + this.bloomFilterSize = Preconditions.checkNotNull(bloomFilterSize); + this.uncompressedStoreFileSize = Preconditions.checkNotNull(uncompressedStoreFileSize); + this.writeRequestCount = writeRequestCount; + this.readRequestCount = readRequestCount; + this.filteredReadRequestCount = filteredReadRequestCount; + this.completedSequenceId = completedSequenceId; + this.storeSequenceIds = Preconditions.checkNotNull(storeSequenceIds); + this.dataLocality = dataLocality; + this.lastMajorCompactionTimestamp = lastMajorCompactionTimestamp; + } + + @Override + public byte[] getRegionName() { + return name; + } + + @Override + public int getStoreCount() { + return storeCount; + } + + @Override + public int getStoreFileCount() { + return storeFileCount; + } + + @Override + public Size getStoreFileSize() { + return storeFileSize; + } + + @Override + public Size getMemStoreSize() { + return memStoreSize; + } + + @Override + public long getReadRequestCount() { + return readRequestCount; + } + + @Override + public long getFilteredReadRequestCount() { + return filteredReadRequestCount; + } + + @Override + public long getWriteRequestCount() { + return writeRequestCount; + } + + @Override + public Size getStoreFileIndexSize() { + return indexSize; + } + + @Override + public Size getStoreFileRootLevelIndexSize() { + return rootLevelIndexSize; + } + + @Override + public Size getStoreFileUncompressedDataIndexSize() { + return uncompressedDataIndexSize; + } + + @Override + public Size getBloomFilterSize() { + return bloomFilterSize; + } + + @Override + public long getCompactingCellCount() { + return compactingCellCount; + } + + @Override + public long getCompactedCellCount() { + return compactedCellCount; + } + + @Override + public long getCompletedSequenceId() { + return completedSequenceId; + } + + @Override + public Map getStoreSequenceId() { + return Collections.unmodifiableMap(storeSequenceIds); + } + + @Override + public Size getUncompressedStoreFileSize() { + return uncompressedStoreFileSize; + } + + @Override + public float getDataLocality() { + return dataLocality; + } + + @Override + public long getLastMajorCompactionTimestamp() { + return lastMajorCompactionTimestamp; + } + + @Override + public String toString() { + StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "storeCount", + this.getStoreCount()); + Strings.appendKeyValue(sb, "storeFileCount", + this.getStoreFileCount()); + Strings.appendKeyValue(sb, "uncompressedStoreFileSize", + this.getUncompressedStoreFileSize()); + Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp", + this.getLastMajorCompactionTimestamp()); + Strings.appendKeyValue(sb, "storeFileSize", + this.getStoreFileSize()); + if (this.getUncompressedStoreFileSize().get() != 0) { + Strings.appendKeyValue(sb, "compressionRatio", + String.format("%.4f", + (float) this.getStoreFileSize().get(Size.Unit.MEGABYTE) / + (float) this.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))); + } + Strings.appendKeyValue(sb, "memStoreSize", + this.getMemStoreSize()); + Strings.appendKeyValue(sb, "readRequestCount", + this.getReadRequestCount()); + Strings.appendKeyValue(sb, "writeRequestCount", + this.getWriteRequestCount()); + Strings.appendKeyValue(sb, "rootLevelIndexSize", + this.getStoreFileRootLevelIndexSize()); + Strings.appendKeyValue(sb, "uncompressedDataIndexSize", + this.getStoreFileUncompressedDataIndexSize()); + Strings.appendKeyValue(sb, "bloomFilterSize", + this.getBloomFilterSize()); + Strings.appendKeyValue(sb, "compactingCellCount", + this.getCompactingCellCount()); + Strings.appendKeyValue(sb, "compactedCellCount", + this.getCompactedCellCount()); + float compactionProgressPct = Float.NaN; + if (this.getCompactingCellCount() > 0) { + compactionProgressPct = ((float) this.getCompactedCellCount() / + (float) this.getCompactingCellCount()); + } + Strings.appendKeyValue(sb, "compactionProgressPct", + compactionProgressPct); + Strings.appendKeyValue(sb, "completedSequenceId", + this.getCompletedSequenceId()); + Strings.appendKeyValue(sb, "dataLocality", + this.getDataLocality()); + return sb.toString(); + } + } + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java index 2fdb7319851..2a56e57cef2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java @@ -24,23 +24,26 @@ import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.TreeMap; -import java.util.TreeSet; - -import org.apache.hadoop.hbase.shaded.com.google.common.base.Objects; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.Coprocessor; +import java.util.stream.Collectors; import org.apache.hadoop.hbase.replication.ReplicationLoadSink; import org.apache.hadoop.hbase.replication.ReplicationLoadSource; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Strings; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.com.google.common.base.Objects; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; /** * This class is used for exporting current state of load on a RegionServer. + * + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link ServerMetrics} instead. */ @InterfaceAudience.Public -public class ServerLoad { +@Deprecated +public class ServerLoad implements ServerMetrics { + private final ServerMetrics metrics; private int stores = 0; private int storefiles = 0; private int storeUncompressedSizeMB = 0; @@ -55,113 +58,200 @@ public class ServerLoad { private int totalStaticBloomSizeKB = 0; private long totalCompactingKVs = 0; private long currentCompactedKVs = 0; - private long reportTime = 0; + /** + * DONT USE this construction. It make a fake server name; + */ @InterfaceAudience.Private public ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) { + this(ServerName.valueOf("localhost,1,1"), serverLoad); + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") + @InterfaceAudience.Private + public ServerLoad(ServerName name, ClusterStatusProtos.ServerLoad serverLoad) { + this(ServerMetricsBuilder.toServerMetrics(name, serverLoad)); this.serverLoad = serverLoad; - this.reportTime = System.currentTimeMillis(); - for (ClusterStatusProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) { - stores += rl.getStores(); - storefiles += rl.getStorefiles(); - storeUncompressedSizeMB += rl.getStoreUncompressedSizeMB(); - storefileSizeMB += rl.getStorefileSizeMB(); - memstoreSizeMB += rl.getMemStoreSizeMB(); - storefileIndexSizeKB += rl.getStorefileIndexSizeKB(); - readRequestsCount += rl.getReadRequestsCount(); - filteredReadRequestsCount += rl.getFilteredReadRequestsCount(); - writeRequestsCount += rl.getWriteRequestsCount(); - rootIndexSizeKB += rl.getRootIndexSizeKB(); - totalStaticIndexSizeKB += rl.getTotalStaticIndexSizeKB(); - totalStaticBloomSizeKB += rl.getTotalStaticBloomSizeKB(); - totalCompactingKVs += rl.getTotalCompactingKVs(); - currentCompactedKVs += rl.getCurrentCompactedKVs(); + } + + @InterfaceAudience.Private + public ServerLoad(ServerMetrics metrics) { + this.metrics = metrics; + this.serverLoad = ServerMetricsBuilder.toServerLoad(metrics); + for (RegionMetrics rl : metrics.getRegionMetrics().values()) { + stores += rl.getStoreCount(); + storefiles += rl.getStoreFileCount(); + storeUncompressedSizeMB += rl.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); + storefileSizeMB += rl.getStoreFileSize().get(Size.Unit.MEGABYTE); + memstoreSizeMB += rl.getMemStoreSize().get(Size.Unit.MEGABYTE); + readRequestsCount += rl.getReadRequestCount(); + filteredReadRequestsCount += rl.getFilteredReadRequestCount(); + writeRequestsCount += rl.getWriteRequestCount(); + storefileIndexSizeKB += rl.getStoreFileIndexSize().get(Size.Unit.KILOBYTE); + rootIndexSizeKB += rl.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE); + totalStaticIndexSizeKB += rl.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); + totalStaticBloomSizeKB += rl.getBloomFilterSize().get(Size.Unit.KILOBYTE); + totalCompactingKVs += rl.getCompactingCellCount(); + currentCompactedKVs += rl.getCompactedCellCount(); } } - // NOTE: Function name cannot start with "get" because then an OpenDataException is thrown because - // HBaseProtos.ServerLoad cannot be converted to an open data type(see HBASE-5967). - /* @return the underlying ServerLoad protobuf object */ + /** + * NOTE: Function name cannot start with "get" because then an OpenDataException is thrown because + * HBaseProtos.ServerLoad cannot be converted to an open data type(see HBASE-5967). + * @return the underlying ServerLoad protobuf object + * @deprecated DONT use this pb object since the byte array backed may be modified in rpc layer + */ @InterfaceAudience.Private + @Deprecated public ClusterStatusProtos.ServerLoad obtainServerLoadPB() { return serverLoad; } protected ClusterStatusProtos.ServerLoad serverLoad; - /* @return number of requests since last report. */ + /** + * @return number of requests since last report. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. + * Use {@link #getRequestCountPerSecond} instead. + */ + @Deprecated public long getNumberOfRequests() { - return serverLoad.getNumberOfRequests(); + return getRequestCountPerSecond(); } + + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * No flag in 2.0 + */ + @Deprecated public boolean hasNumberOfRequests() { - return serverLoad.hasNumberOfRequests(); + return true; } - /* @return total Number of requests from the start of the region server. */ + /** + * @return total Number of requests from the start of the region server. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. + * Use {@link #getRequestCount} instead. + */ + @Deprecated public long getTotalNumberOfRequests() { - return serverLoad.getTotalNumberOfRequests(); + return getRequestCount(); } + + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * No flag in 2.0 + */ + @Deprecated public boolean hasTotalNumberOfRequests() { - return serverLoad.hasTotalNumberOfRequests(); + return true; } - /* @return the amount of used heap, in MB. */ + /** + * @return the amount of used heap, in MB. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. + * Use {@link #getUsedHeapSize} instead. + */ + @Deprecated public int getUsedHeapMB() { - return serverLoad.getUsedHeapMB(); + return (int) getUsedHeapSize().get(Size.Unit.MEGABYTE); } + + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * No flag in 2.0 + */ + @Deprecated public boolean hasUsedHeapMB() { - return serverLoad.hasUsedHeapMB(); + return true; } - /* @return the maximum allowable size of the heap, in MB. */ + /** + * @return the maximum allowable size of the heap, in MB. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getMaxHeapSize} instead. + */ + @Deprecated public int getMaxHeapMB() { - return serverLoad.getMaxHeapMB(); - } - public boolean hasMaxHeapMB() { - return serverLoad.hasMaxHeapMB(); + return (int) getMaxHeapSize().get(Size.Unit.MEGABYTE); } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * No flag in 2.0 + */ + @Deprecated + public boolean hasMaxHeapMB() { + return true; + } + + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionMetrics} instead. + */ + @Deprecated public int getStores() { return stores; } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. + * Use {@link #getRegionMetrics} instead. + */ + @Deprecated public int getStorefiles() { return storefiles; } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionMetrics} instead. + */ + @Deprecated public int getStoreUncompressedSizeMB() { return storeUncompressedSizeMB; } /** * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStorefileSizeMB()} instead. + * Use {@link #getRegionMetrics} instead. */ @Deprecated public int getStorefileSizeInMB() { return storefileSizeMB; } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionMetrics} instead. + */ + @Deprecated public int getStorefileSizeMB() { return storefileSizeMB; } /** * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getMemStoreSizeMB()} instead. + * Use {@link #getRegionMetrics} instead. */ @Deprecated public int getMemstoreSizeInMB() { return memstoreSizeMB; } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionMetrics} instead. + */ + @Deprecated public int getMemStoreSizeMB() { return memstoreSizeMB; } /** * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStorefileIndexSizeKB()} instead. + * Use {@link #getRegionMetrics} instead. */ @Deprecated public int getStorefileIndexSizeInMB() { @@ -169,71 +259,162 @@ public class ServerLoad { return (int) (getStorefileIndexSizeKB() >> 10); } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionMetrics} instead. + */ + @Deprecated public long getStorefileIndexSizeKB() { return storefileIndexSizeKB; } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionMetrics} instead. + */ + @Deprecated public long getReadRequestsCount() { return readRequestsCount; } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionMetrics} instead. + */ + @Deprecated public long getFilteredReadRequestsCount() { return filteredReadRequestsCount; } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionMetrics} instead. + */ + @Deprecated public long getWriteRequestsCount() { return writeRequestsCount; } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionMetrics} instead. + */ + @Deprecated public int getRootIndexSizeKB() { return rootIndexSizeKB; } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionMetrics} instead. + */ + @Deprecated public int getTotalStaticIndexSizeKB() { return totalStaticIndexSizeKB; } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionMetrics} instead. + */ + @Deprecated public int getTotalStaticBloomSizeKB() { return totalStaticBloomSizeKB; } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionMetrics} instead. + */ + @Deprecated public long getTotalCompactingKVs() { return totalCompactingKVs; } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionMetrics} instead. + */ + @Deprecated public long getCurrentCompactedKVs() { return currentCompactedKVs; } /** - * @return the number of regions + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionMetrics} instead. */ + @Deprecated public int getNumberOfRegions() { - return serverLoad.getRegionLoadsCount(); + return metrics.getRegionMetrics().size(); } + @Override + public ServerName getServerName() { + return metrics.getServerName(); + } + + @Override + public long getRequestCountPerSecond() { + return metrics.getRequestCountPerSecond(); + } + + @Override + public long getRequestCount() { + return metrics.getRequestCount(); + } + + @Override + public Size getUsedHeapSize() { + return metrics.getUsedHeapSize(); + } + + @Override + public Size getMaxHeapSize() { + return metrics.getMaxHeapSize(); + } + + @Override public int getInfoServerPort() { - return serverLoad.getInfoServerPort(); + return metrics.getInfoServerPort(); } /** * Call directly from client such as hbase shell * @return the list of ReplicationLoadSource */ + @Override public List getReplicationLoadSourceList() { - return ProtobufUtil.toReplicationLoadSourceList(serverLoad.getReplLoadSourceList()); + return metrics.getReplicationLoadSourceList(); } /** * Call directly from client such as hbase shell * @return ReplicationLoadSink */ + @Override public ReplicationLoadSink getReplicationLoadSink() { - if (serverLoad.hasReplLoadSink()) { - return ProtobufUtil.toReplicationLoadSink(serverLoad.getReplLoadSink()); - } else { - return null; - } + return metrics.getReplicationLoadSink(); + } + + @Override + public Map getRegionMetrics() { + return metrics.getRegionMetrics(); + } + + @Override + public List getCoprocessorNames() { + return metrics.getCoprocessorNames(); + } + + @Override + public long getReportTimestamp() { + return metrics.getReportTimestamp(); + } + + @Override + public long getLastReportTimestamp() { + return metrics.getLastReportTimestamp(); } /** @@ -243,8 +424,11 @@ public class ServerLoad { * interim, until we can figure out how to make rebalancing use all the info * available, we're just going to make load purely the number of regions. * - * @return load factor for this server + * @return load factor for this server. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getNumberOfRegions} instead. */ + @Deprecated public int getLoad() { // See above comment // int load = numberOfRequests == 0 ? 1 : numberOfRequests; @@ -254,53 +438,43 @@ public class ServerLoad { } /** - * @return region load metrics + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRegionMetrics} instead. */ + @Deprecated public Map getRegionsLoad() { - Map regionLoads = - new TreeMap<>(Bytes.BYTES_COMPARATOR); - for (ClusterStatusProtos.RegionLoad rl : serverLoad.getRegionLoadsList()) { - RegionLoad regionLoad = new RegionLoad(rl); - regionLoads.put(regionLoad.getName(), regionLoad); - } - return regionLoads; + return getRegionMetrics().entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> new RegionLoad(e.getValue()), + (v1, v2) -> { + throw new RuntimeException("key collisions?"); + }, () -> new TreeMap(Bytes.BYTES_COMPARATOR))); } /** - * Return the RegionServer-level coprocessors - * @return string array of loaded RegionServer-level coprocessors + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getCoprocessorNames} instead. */ + @Deprecated public String[] getRegionServerCoprocessors() { - List list = obtainServerLoadPB().getCoprocessorsList(); - String [] ret = new String[list.size()]; - int i = 0; - for (Coprocessor elem : list) { - ret[i++] = elem.getName(); - } - - return ret; + return getCoprocessorNames().toArray(new String[getCoprocessorNames().size()]); } /** - * Return the RegionServer-level and Region-level coprocessors - * @return string array of loaded RegionServer-level and - * Region-level coprocessors + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getCoprocessorNames} instead. */ + @Deprecated public String[] getRsCoprocessors() { - // Need a set to remove duplicates, but since generated Coprocessor class - // is not Comparable, make it a Set instead of Set - TreeSet coprocessSet = new TreeSet<>(); - for (Coprocessor coprocessor : obtainServerLoadPB().getCoprocessorsList()) { - coprocessSet.add(coprocessor.getName()); - } - return coprocessSet.toArray(new String[coprocessSet.size()]); + return getRegionServerCoprocessors(); } /** - * @return number of requests per second received since the last report + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getRequestCountPerSecond} instead. */ + @Deprecated public double getRequestsPerSecond() { - return getNumberOfRequests(); + return getRequestCountPerSecond(); } /** @@ -308,70 +482,73 @@ public class ServerLoad { */ @Override public String toString() { - StringBuilder sb = - Strings.appendKeyValue(new StringBuilder(), "requestsPerSecond", - Double.valueOf(getRequestsPerSecond())); + StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "requestsPerSecond", + Double.valueOf(getRequestsPerSecond())); Strings.appendKeyValue(sb, "numberOfOnlineRegions", Integer.valueOf(getNumberOfRegions())); - sb = Strings.appendKeyValue(sb, "usedHeapMB", Integer.valueOf(this.getUsedHeapMB())); - sb = Strings.appendKeyValue(sb, "maxHeapMB", Integer.valueOf(getMaxHeapMB())); - sb = Strings.appendKeyValue(sb, "numberOfStores", Integer.valueOf(this.stores)); - sb = Strings.appendKeyValue(sb, "numberOfStorefiles", Integer.valueOf(this.storefiles)); - sb = - Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", - Integer.valueOf(this.storeUncompressedSizeMB)); - sb = Strings.appendKeyValue(sb, "storefileSizeMB", Integer.valueOf(this.storefileSizeMB)); + Strings.appendKeyValue(sb, "usedHeapMB", Integer.valueOf(this.getUsedHeapMB())); + Strings.appendKeyValue(sb, "maxHeapMB", Integer.valueOf(getMaxHeapMB())); + Strings.appendKeyValue(sb, "numberOfStores", Integer.valueOf(this.stores)); + Strings.appendKeyValue(sb, "numberOfStorefiles", Integer.valueOf(this.storefiles)); + Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", + Integer.valueOf(this.storeUncompressedSizeMB)); + Strings.appendKeyValue(sb, "storefileSizeMB", Integer.valueOf(this.storefileSizeMB)); if (this.storeUncompressedSizeMB != 0) { - sb = - Strings.appendKeyValue( - sb, - "compressionRatio", - String.format("%.4f", (float) this.storefileSizeMB - / (float) this.storeUncompressedSizeMB)); + Strings.appendKeyValue(sb, "compressionRatio", String.format("%.4f", + (float) this.storefileSizeMB / (float) this.storeUncompressedSizeMB)); } - sb = Strings.appendKeyValue(sb, "memstoreSizeMB", Integer.valueOf(this.memstoreSizeMB)); - sb = - Strings.appendKeyValue(sb, "storefileIndexSizeKB", - Long.valueOf(this.storefileIndexSizeKB)); - sb = Strings.appendKeyValue(sb, "readRequestsCount", Long.valueOf(this.readRequestsCount)); - sb = Strings.appendKeyValue(sb, "filteredReadRequestsCount", - Long.valueOf(this.filteredReadRequestsCount)); - sb = Strings.appendKeyValue(sb, "writeRequestsCount", Long.valueOf(this.writeRequestsCount)); - sb = Strings.appendKeyValue(sb, "rootIndexSizeKB", Integer.valueOf(this.rootIndexSizeKB)); - sb = - Strings.appendKeyValue(sb, "totalStaticIndexSizeKB", - Integer.valueOf(this.totalStaticIndexSizeKB)); - sb = - Strings.appendKeyValue(sb, "totalStaticBloomSizeKB", - Integer.valueOf(this.totalStaticBloomSizeKB)); - sb = Strings.appendKeyValue(sb, "totalCompactingKVs", Long.valueOf(this.totalCompactingKVs)); - sb = Strings.appendKeyValue(sb, "currentCompactedKVs", Long.valueOf(this.currentCompactedKVs)); + Strings.appendKeyValue(sb, "memstoreSizeMB", Integer.valueOf(this.memstoreSizeMB)); + Strings.appendKeyValue(sb, "storefileIndexSizeKB", + Long.valueOf(this.storefileIndexSizeKB)); + Strings.appendKeyValue(sb, "readRequestsCount", Long.valueOf(this.readRequestsCount)); + Strings.appendKeyValue(sb, "filteredReadRequestsCount", + Long.valueOf(this.filteredReadRequestsCount)); + Strings.appendKeyValue(sb, "writeRequestsCount", Long.valueOf(this.writeRequestsCount)); + Strings.appendKeyValue(sb, "rootIndexSizeKB", Integer.valueOf(this.rootIndexSizeKB)); + Strings.appendKeyValue(sb, "totalStaticIndexSizeKB", + Integer.valueOf(this.totalStaticIndexSizeKB)); + Strings.appendKeyValue(sb, "totalStaticBloomSizeKB", + Integer.valueOf(this.totalStaticBloomSizeKB)); + Strings.appendKeyValue(sb, "totalCompactingKVs", Long.valueOf(this.totalCompactingKVs)); + Strings.appendKeyValue(sb, "currentCompactedKVs", Long.valueOf(this.currentCompactedKVs)); float compactionProgressPct = Float.NaN; if (this.totalCompactingKVs > 0) { compactionProgressPct = Float.valueOf((float) this.currentCompactedKVs / this.totalCompactingKVs); } - sb = Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); + Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); String[] coprocessorStrings = getRsCoprocessors(); if (coprocessorStrings != null) { - sb = Strings.appendKeyValue(sb, "coprocessors", Arrays.toString(coprocessorStrings)); + Strings.appendKeyValue(sb, "coprocessors", Arrays.toString(coprocessorStrings)); } return sb.toString(); } + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link ServerMetricsBuilder#of(ServerName)} instead. + */ + @Deprecated public static final ServerLoad EMPTY_SERVERLOAD = - new ServerLoad(ClusterStatusProtos.ServerLoad.newBuilder().build()); + new ServerLoad(ServerName.valueOf("localhost,1,1"), + ClusterStatusProtos.ServerLoad.newBuilder().build()); + /** + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 + * Use {@link #getReportTimestamp} instead. + */ + @Deprecated public long getReportTime() { - return reportTime; + return getReportTimestamp(); } @Override public int hashCode() { - return Objects.hashCode(stores, storefiles, storeUncompressedSizeMB, - storefileSizeMB, memstoreSizeMB, storefileIndexSizeKB, readRequestsCount, - filteredReadRequestsCount, writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB, - totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs); + return Objects + .hashCode(stores, storefiles, storeUncompressedSizeMB, storefileSizeMB, memstoreSizeMB, + storefileIndexSizeKB, readRequestsCount, filteredReadRequestsCount, writeRequestsCount, + rootIndexSizeKB, totalStaticIndexSizeKB, totalStaticBloomSizeKB, totalCompactingKVs, + currentCompactedKVs); } @Override @@ -379,16 +556,17 @@ public class ServerLoad { if (other == this) return true; if (other instanceof ServerLoad) { ServerLoad sl = ((ServerLoad) other); - return stores == sl.stores && storefiles == sl.storefiles && - storeUncompressedSizeMB == sl.storeUncompressedSizeMB && - storefileSizeMB == sl.storefileSizeMB && memstoreSizeMB == sl.memstoreSizeMB && - storefileIndexSizeKB == sl.storefileIndexSizeKB && readRequestsCount == sl.readRequestsCount && - filteredReadRequestsCount == sl.filteredReadRequestsCount && - writeRequestsCount == sl.writeRequestsCount && rootIndexSizeKB == sl.rootIndexSizeKB && - totalStaticIndexSizeKB == sl.totalStaticIndexSizeKB && - totalStaticBloomSizeKB == sl.totalStaticBloomSizeKB && - totalCompactingKVs == sl.totalCompactingKVs && - currentCompactedKVs == sl.currentCompactedKVs; + return stores == sl.stores && storefiles == sl.storefiles + && storeUncompressedSizeMB == sl.storeUncompressedSizeMB + && storefileSizeMB == sl.storefileSizeMB && memstoreSizeMB == sl.memstoreSizeMB + && storefileIndexSizeKB == sl.storefileIndexSizeKB + && readRequestsCount == sl.readRequestsCount + && filteredReadRequestsCount == sl.filteredReadRequestsCount + && writeRequestsCount == sl.writeRequestsCount && rootIndexSizeKB == sl.rootIndexSizeKB + && totalStaticIndexSizeKB == sl.totalStaticIndexSizeKB + && totalStaticBloomSizeKB == sl.totalStaticBloomSizeKB + && totalCompactingKVs == sl.totalCompactingKVs + && currentCompactedKVs == sl.currentCompactedKVs; } return false; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java new file mode 100644 index 00000000000..1ef3126de3c --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java @@ -0,0 +1,90 @@ +/** + * Copyright The Apache Software Foundation + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.List; +import java.util.Map; +import org.apache.hadoop.hbase.replication.ReplicationLoadSink; +import org.apache.hadoop.hbase.replication.ReplicationLoadSource; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * This class is used for exporting current state of load on a RegionServer. + */ +@InterfaceAudience.Public +public interface ServerMetrics { + + ServerName getServerName(); + /** + * @return the number of requests per second. + */ + long getRequestCountPerSecond(); + + /** + * @return total Number of requests from the start of the region server. + */ + long getRequestCount(); + + /** + * @return the amount of used heap + */ + Size getUsedHeapSize(); + + /** + * @return the maximum allowable size of the heap + */ + Size getMaxHeapSize(); + + int getInfoServerPort(); + + /** + * Call directly from client such as hbase shell + * @return the list of ReplicationLoadSource + */ + List getReplicationLoadSourceList(); + + /** + * Call directly from client such as hbase shell + * @return ReplicationLoadSink + */ + @Nullable + ReplicationLoadSink getReplicationLoadSink(); + + /** + * @return region load metrics + */ + Map getRegionMetrics(); + + /** + * Return the RegionServer-level and Region-level coprocessors + * @return string list of loaded RegionServer-level and Region-level coprocessors + */ + List getCoprocessorNames(); + + /** + * @return the timestamp (server side) of generating this metrics + */ + long getReportTimestamp(); + + /** + * @return the last timestamp (server side) of generating this metrics + */ + long getLastReportTimestamp(); + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java new file mode 100644 index 00000000000..e501c439806 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java @@ -0,0 +1,352 @@ +/** + * Copyright The Apache Software Foundation + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.stream.Collectors; +import org.apache.hadoop.hbase.replication.ReplicationLoadSink; +import org.apache.hadoop.hbase.replication.ReplicationLoadSource; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Strings; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; + +@InterfaceAudience.Private +public final class ServerMetricsBuilder { + + /** + * @param sn the server name + * @return a empty metrics + */ + public static ServerMetrics of(ServerName sn) { + return newBuilder(sn).build(); + } + + public static ServerMetrics toServerMetrics(ClusterStatusProtos.LiveServerInfo serverInfo) { + return toServerMetrics(ProtobufUtil.toServerName(serverInfo.getServer()), + serverInfo.getServerLoad()); + } + + public static ServerMetrics toServerMetrics(ServerName serverName, + ClusterStatusProtos.ServerLoad serverLoadPB) { + return ServerMetricsBuilder.newBuilder(serverName) + .setRequestCountPerSecond(serverLoadPB.getNumberOfRequests()) + .setRequestCount(serverLoadPB.getTotalNumberOfRequests()) + .setInfoServerPort(serverLoadPB.getInfoServerPort()) + .setMaxHeapSize(new Size(serverLoadPB.getMaxHeapMB(), Size.Unit.MEGABYTE)) + .setUsedHeapSize(new Size(serverLoadPB.getUsedHeapMB(), Size.Unit.MEGABYTE)) + .setCoprocessorNames(serverLoadPB.getCoprocessorsList().stream() + .map(HBaseProtos.Coprocessor::getName).collect(Collectors.toList())) + .setRegionMetrics(serverLoadPB.getRegionLoadsList().stream() + .map(RegionMetricsBuilder::toRegionMetrics) + .collect(Collectors.toList())) + .setReplicationLoadSources(serverLoadPB.getReplLoadSourceList().stream() + .map(ProtobufUtil::toReplicationLoadSource) + .collect(Collectors.toList())) + .setReplicationLoadSink(serverLoadPB.hasReplLoadSink() ? + ProtobufUtil.toReplicationLoadSink(serverLoadPB.getReplLoadSink()) : null) + .setReportTimestamp(serverLoadPB.getReportEndTime()) + .setLastReportTimestamp(serverLoadPB.getReportStartTime()) + .build(); + } + + public static List toCoprocessor(List names) { + return names.stream() + .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) + .collect(Collectors.toList()); + } + + public static ClusterStatusProtos.ServerLoad toServerLoad(ServerMetrics metrics) { + ClusterStatusProtos.ServerLoad.Builder builder = ClusterStatusProtos.ServerLoad.newBuilder() + .setNumberOfRequests(metrics.getRequestCountPerSecond()) + .setTotalNumberOfRequests(metrics.getRequestCount()) + .setInfoServerPort(metrics.getInfoServerPort()) + .setMaxHeapMB((int) metrics.getMaxHeapSize().get(Size.Unit.MEGABYTE)) + .setUsedHeapMB((int) metrics.getUsedHeapSize().get(Size.Unit.MEGABYTE)) + .addAllCoprocessors(toCoprocessor(metrics.getCoprocessorNames())) + .addAllRegionLoads(metrics.getRegionMetrics().values().stream() + .map(RegionMetricsBuilder::toRegionLoad) + .collect(Collectors.toList())) + .addAllReplLoadSource(metrics.getReplicationLoadSourceList().stream() + .map(ProtobufUtil::toReplicationLoadSource) + .collect(Collectors.toList())) + .setReportStartTime(metrics.getLastReportTimestamp()) + .setReportEndTime(metrics.getReportTimestamp()); + if (metrics.getReplicationLoadSink() != null) { + builder.setReplLoadSink(ProtobufUtil.toReplicationLoadSink( + metrics.getReplicationLoadSink())); + } + return builder.build(); + } + + public static ServerMetricsBuilder newBuilder(ServerName sn) { + return new ServerMetricsBuilder(sn); + } + + private final ServerName serverName; + private long requestCountPerSecond; + private long requestCount; + private Size usedHeapSize = Size.ZERO; + private Size maxHeapSize = Size.ZERO; + private int infoServerPort; + private List sources = Collections.emptyList(); + @Nullable + private ReplicationLoadSink sink = null; + private final Map regionStatus = new TreeMap<>(Bytes.BYTES_COMPARATOR); + private List coprocessorNames = Collections.emptyList(); + private long reportTimestamp = System.currentTimeMillis(); + private long lastReportTimestamp = 0; + private ServerMetricsBuilder(ServerName serverName) { + this.serverName = serverName; + } + + public ServerMetricsBuilder setRequestCountPerSecond(long value) { + this.requestCountPerSecond = value; + return this; + } + + public ServerMetricsBuilder setRequestCount(long value) { + this.requestCount = value; + return this; + } + + public ServerMetricsBuilder setUsedHeapSize(Size value) { + this.usedHeapSize = value; + return this; + } + + public ServerMetricsBuilder setMaxHeapSize(Size value) { + this.maxHeapSize = value; + return this; + } + + public ServerMetricsBuilder setInfoServerPort(int value) { + this.infoServerPort = value; + return this; + } + + public ServerMetricsBuilder setReplicationLoadSources(List value) { + this.sources = value; + return this; + } + + public ServerMetricsBuilder setReplicationLoadSink(ReplicationLoadSink value) { + this.sink = value; + return this; + } + + public ServerMetricsBuilder setRegionMetrics(List value) { + value.forEach(v -> this.regionStatus.put(v.getRegionName(), v)); + return this; + } + + public ServerMetricsBuilder setCoprocessorNames(List value) { + this.coprocessorNames = value; + return this; + } + + public ServerMetricsBuilder setReportTimestamp(long value) { + this.reportTimestamp = value; + return this; + } + + public ServerMetricsBuilder setLastReportTimestamp(long value) { + this.lastReportTimestamp = value; + return this; + } + + public ServerMetrics build() { + return new ServerMetricsImpl( + serverName, + requestCountPerSecond, + requestCount, + usedHeapSize, + maxHeapSize, + infoServerPort, + sources, + sink, + regionStatus, + coprocessorNames, + reportTimestamp, + lastReportTimestamp); + } + + private static class ServerMetricsImpl implements ServerMetrics { + private final ServerName serverName; + private final long requestCountPerSecond; + private final long requestCount; + private final Size usedHeapSize; + private final Size maxHeapSize; + private final int infoServerPort; + private final List sources; + @Nullable + private final ReplicationLoadSink sink; + private final Map regionStatus; + private final List coprocessorNames; + private final long reportTimestamp; + private final long lastReportTimestamp; + + ServerMetricsImpl(ServerName serverName, long requestCountPerSecond, long requestCount, + Size usedHeapSize, Size maxHeapSize, int infoServerPort, List sources, + ReplicationLoadSink sink, Map regionStatus, + List coprocessorNames, long reportTimestamp, long lastReportTimestamp) { + this.serverName = Preconditions.checkNotNull(serverName); + this.requestCountPerSecond = requestCountPerSecond; + this.requestCount = requestCount; + this.usedHeapSize = Preconditions.checkNotNull(usedHeapSize); + this.maxHeapSize = Preconditions.checkNotNull(maxHeapSize); + this.infoServerPort = infoServerPort; + this.sources = Preconditions.checkNotNull(sources); + this.sink = sink; + this.regionStatus = Preconditions.checkNotNull(regionStatus); + this.coprocessorNames =Preconditions.checkNotNull(coprocessorNames); + this.reportTimestamp = reportTimestamp; + this.lastReportTimestamp = lastReportTimestamp; + } + + @Override + public ServerName getServerName() { + return serverName; + } + @Override + public long getRequestCountPerSecond() { + return requestCountPerSecond; + } + + @Override + public long getRequestCount() { + return requestCount; + } + + @Override + public Size getUsedHeapSize() { + return usedHeapSize; + } + + @Override + public Size getMaxHeapSize() { + return maxHeapSize; + } + + @Override + public int getInfoServerPort() { + return infoServerPort; + } + + @Override + public List getReplicationLoadSourceList() { + return Collections.unmodifiableList(sources); + } + + @Override + public ReplicationLoadSink getReplicationLoadSink() { + return sink; + } + + @Override + public Map getRegionMetrics() { + return Collections.unmodifiableMap(regionStatus); + } + + @Override + public List getCoprocessorNames() { + return Collections.unmodifiableList(coprocessorNames); + } + + @Override + public long getReportTimestamp() { + return reportTimestamp; + } + + @Override + public long getLastReportTimestamp() { + return lastReportTimestamp; + } + + @Override + public String toString() { + int storeCount = 0; + int storeFileCount = 0; + long uncompressedStoreFileSizeMB = 0; + long storeFileSizeMB = 0; + long memStoreSizeMB = 0; + long storefileIndexSizeKB = 0; + long rootLevelIndexSizeKB = 0; + long readRequestsCount = 0; + long writeRequestsCount = 0; + long filteredReadRequestsCount = 0; + long bloomFilterSizeMB = 0; + long compactingCellCount = 0; + long compactedCellCount = 0; + for (RegionMetrics r : getRegionMetrics().values()) { + storeCount += r.getStoreCount(); + storeFileCount += r.getStoreFileCount(); + uncompressedStoreFileSizeMB += r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); + storeFileSizeMB += r.getStoreFileSize().get(Size.Unit.MEGABYTE); + memStoreSizeMB += r.getMemStoreSize().get(Size.Unit.MEGABYTE); + storefileIndexSizeKB += r.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); + readRequestsCount += r.getReadRequestCount(); + writeRequestsCount += r.getWriteRequestCount(); + filteredReadRequestsCount += r.getFilteredReadRequestCount(); + rootLevelIndexSizeKB += r.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE); + bloomFilterSizeMB += r.getBloomFilterSize().get(Size.Unit.MEGABYTE); + compactedCellCount += r.getCompactedCellCount(); + compactingCellCount += r.getCompactingCellCount(); + } + StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "requestsPerSecond", + Double.valueOf(getRequestCountPerSecond())); + Strings.appendKeyValue(sb, "numberOfOnlineRegions", + Integer.valueOf(getRegionMetrics().size())); + Strings.appendKeyValue(sb, "usedHeapMB", getUsedHeapSize()); + Strings.appendKeyValue(sb, "maxHeapMB", getMaxHeapSize()); + Strings.appendKeyValue(sb, "numberOfStores", storeCount); + Strings.appendKeyValue(sb, "numberOfStorefiles", storeFileCount); + Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", uncompressedStoreFileSizeMB); + Strings.appendKeyValue(sb, "storefileSizeMB", storeFileSizeMB); + if (uncompressedStoreFileSizeMB != 0) { + Strings.appendKeyValue(sb, "compressionRatio", String.format("%.4f", + (float) storeFileSizeMB / (float) uncompressedStoreFileSizeMB)); + } + Strings.appendKeyValue(sb, "memstoreSizeMB", memStoreSizeMB); + Strings.appendKeyValue(sb, "readRequestsCount", readRequestsCount); + Strings.appendKeyValue(sb, "filteredReadRequestsCount", filteredReadRequestsCount); + Strings.appendKeyValue(sb, "writeRequestsCount", writeRequestsCount); + Strings.appendKeyValue(sb, "rootIndexSizeKB", rootLevelIndexSizeKB); + Strings.appendKeyValue(sb, "totalStaticIndexSizeKB", storefileIndexSizeKB); + Strings.appendKeyValue(sb, "totalStaticBloomSizeKB", bloomFilterSizeMB); + Strings.appendKeyValue(sb, "totalCompactingKVs", compactingCellCount); + Strings.appendKeyValue(sb, "currentCompactedKVs", compactedCellCount); + float compactionProgressPct = Float.NaN; + if (compactingCellCount > 0) { + compactionProgressPct = + Float.valueOf((float) compactedCellCount / compactingCellCount); + } + Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); + Strings.appendKeyValue(sb, "coprocessors", getCoprocessorNames()); + return sb.toString(); + } + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java new file mode 100644 index 00000000000..87d75542111 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java @@ -0,0 +1,158 @@ +/** + * Copyright The Apache Software Foundation + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.math.BigDecimal; +import java.util.Objects; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions; + +/** + * It is used to represent the size with different units. + * This class doesn't serve for the precise computation. + */ +@InterfaceAudience.Public +public final class Size implements Comparable { + public static final Size ZERO = new Size(0, Unit.KILOBYTE); + private static final BigDecimal SCALE_BASE = BigDecimal.valueOf(1024D); + + public enum Unit { + // keep the room to add more units for HBase 10.x + PETABYTE(100, "PB"), + TERABYTE(99, "TB"), + GIGABYTE(98, "GB"), + MEGABYTE(97, "MB"), + KILOBYTE(96, "KB"), + BYTE(95, "B"); + private final int orderOfSize; + private final String simpleName; + + Unit(int orderOfSize, String simpleName) { + this.orderOfSize = orderOfSize; + this.simpleName = simpleName; + } + + public int getOrderOfSize() { + return orderOfSize; + } + + public String getSimpleName() { + return simpleName; + } + } + + private final double value; + private final Unit unit; + + public Size(double value, Unit unit) { + if (value < 0) { + throw new IllegalArgumentException("The value:" + value + " can't be negative"); + } + this.value = value; + this.unit = Preconditions.checkNotNull(unit); + } + + /** + * @return size unit + */ + public Unit getUnit() { + return unit; + } + + /** + * get the value + */ + public long getLongValue() { + return (long) value; + } + + /** + * get the value + */ + public double get() { + return value; + } + + /** + * get the value which is converted to specified unit. + * + * @param unit size unit + * @return the converted value + */ + public double get(Unit unit) { + if (value == 0) { + return value; + } + int diff = this.unit.getOrderOfSize() - unit.getOrderOfSize(); + if (diff == 0) { + return value; + } + + BigDecimal rval = BigDecimal.valueOf(value); + for (int i = 0; i != Math.abs(diff); ++i) { + rval = diff > 0 ? rval.multiply(SCALE_BASE) : rval.divide(SCALE_BASE); + } + return rval.doubleValue(); + } + + @Override + public int compareTo(Size other) { + int diff = unit.getOrderOfSize() - other.unit.getOrderOfSize(); + if (diff == 0) { + return Double.compare(value, other.value); + } + + BigDecimal thisValue = BigDecimal.valueOf(value); + BigDecimal otherValue = BigDecimal.valueOf(other.value); + if (diff > 0) { + for (int i = 0; i != Math.abs(diff); ++i) { + thisValue = thisValue.multiply(SCALE_BASE); + } + } else { + for (int i = 0; i != Math.abs(diff); ++i) { + otherValue = otherValue.multiply(SCALE_BASE); + } + } + return thisValue.compareTo(otherValue); + } + + @Override + public String toString() { + return value + unit.getSimpleName(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj == this) { + return true; + } + if (obj instanceof Size) { + return compareTo((Size)obj) == 0; + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(value, unit); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 0567e8e9428..de022d35a3c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -28,12 +28,11 @@ import java.util.Set; import java.util.TreeMap; import java.util.concurrent.Future; import java.util.regex.Pattern; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CacheEvictionStats; +import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.ClusterStatus; -import org.apache.hadoop.hbase.ClusterStatus.Option; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor; @@ -43,7 +42,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.replication.TableCFs; import org.apache.hadoop.hbase.client.security.SecurityCapability; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; @@ -60,6 +58,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; /** * The administrative API for HBase. Obtain an instance from {@link Connection#getAdmin()} and diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 2ae51aca8cb..c01d5fa1415 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import com.google.protobuf.RpcChannel; - import java.util.Arrays; import java.util.Collection; import java.util.EnumSet; @@ -29,9 +28,8 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.function.Function; import java.util.regex.Pattern; - +import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.ClusterStatus; -import org.apache.hadoop.hbase.ClusterStatus.Option; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.ServerName; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 0f0679d8ce7..09fdeffca23 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import com.google.protobuf.RpcChannel; - import java.util.Collection; import java.util.EnumSet; import java.util.List; @@ -29,9 +28,8 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; import java.util.function.Function; import java.util.regex.Pattern; - +import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.ClusterStatus; -import org.apache.hadoop.hbase.ClusterStatus.Option; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.ServerName; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java index 5ff1b67c611..0c53ee4719d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java @@ -264,7 +264,7 @@ class ClusterStatusListener implements Closeable { ByteBufInputStream bis = new ByteBufInputStream(dp.content()); try { ClusterStatusProtos.ClusterStatus csp = ClusterStatusProtos.ClusterStatus.parseFrom(bis); - ClusterStatus ncs = ProtobufUtil.convert(csp); + ClusterStatus ncs = ProtobufUtil.toClusterStatus(csp); receive(ncs); } finally { bis.close(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 600ee696145..bbcc825a07e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client; import com.google.protobuf.Descriptors; import com.google.protobuf.Message; import com.google.protobuf.RpcController; - import java.io.Closeable; import java.io.IOException; import java.io.InterruptedIOException; @@ -45,13 +44,12 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CacheEvictionStats; import org.apache.hadoop.hbase.CacheEvictionStatsBuilder; +import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.ClusterStatus; -import org.apache.hadoop.hbase.ClusterStatus.Option; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -103,6 +101,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -2086,7 +2085,7 @@ public class HBaseAdmin implements Admin { @Override protected ClusterStatus rpcCall() throws Exception { GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest(options); - return ProtobufUtil.convert( + return ProtobufUtil.toClusterStatus( master.getClusterStatus(getRpcController(), req).getClusterStatus()); } }); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index bb427b1b494..5e1c65445cc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -23,7 +23,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.protobuf.Message; import com.google.protobuf.RpcChannel; - import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -43,12 +42,11 @@ import java.util.function.Function; import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; - import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AsyncMetaTableAccessor; +import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.ClusterStatus; -import org.apache.hadoop.hbase.ClusterStatus.Option; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; @@ -87,6 +85,7 @@ import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; import org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer; import org.apache.hadoop.hbase.shaded.io.netty.util.Timeout; @@ -2617,7 +2616,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { . call(controller, stub, RequestConverter.buildGetClusterStatusRequest(options), (s, c, req, done) -> s.getClusterStatus(c, req, done), - resp -> ProtobufUtil.convert(resp.getClusterStatus()))).call(); + resp -> ProtobufUtil.toClusterStatus(resp.getClusterStatus()))).call(); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java index 7ae0e0f9cfc..de4d4cac53b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java @@ -15,11 +15,13 @@ import org.apache.yetus.audience.InterfaceAudience; /** * A HBase ReplicationLoad to present MetricsSink information */ -@InterfaceAudience.Private +@InterfaceAudience.Public public class ReplicationLoadSink { - private long ageOfLastAppliedOp; - private long timeStampsOfLastAppliedOp; + private final long ageOfLastAppliedOp; + private final long timeStampsOfLastAppliedOp; + // TODO: add the builder for this class + @InterfaceAudience.Private public ReplicationLoadSink(long age, long timeStamp) { this.ageOfLastAppliedOp = age; this.timeStampsOfLastAppliedOp = timeStamp; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java index 0b028058bc1..845320cc91f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java @@ -15,14 +15,16 @@ import org.apache.yetus.audience.InterfaceAudience; /** * A HBase ReplicationLoad to present MetricsSource information */ -@InterfaceAudience.Private +@InterfaceAudience.Public public class ReplicationLoadSource { - private String peerID; - private long ageOfLastShippedOp; - private int sizeOfLogQueue; - private long timeStampOfLastShippedOp; - private long replicationLag; + private final String peerID; + private final long ageOfLastShippedOp; + private final int sizeOfLogQueue; + private final long timeStampOfLastShippedOp; + private final long replicationLag; + // TODO: add the builder for this class + @InterfaceAudience.Private public ReplicationLoadSource(String id, long age, int size, long timeStamp, long lag) { this.peerID = id; this.ageOfLastShippedOp = age; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index c9ea5a5b085..17b114108c0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -25,7 +25,6 @@ import java.lang.reflect.Method; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; -import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -49,9 +48,8 @@ import org.apache.hadoop.hbase.Cell.DataType; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.ClusterId; +import org.apache.hadoop.hbase.ClusterMetricsBuilder; import org.apache.hadoop.hbase.ClusterStatus; -import org.apache.hadoop.hbase.ClusterStatus.Option; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; @@ -60,7 +58,6 @@ import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Append; @@ -92,7 +89,6 @@ import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.LimitInputStream; import org.apache.hadoop.hbase.io.TimeRange; -import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.protobuf.ProtobufMagic; import org.apache.hadoop.hbase.protobuf.ProtobufMessageConverter; import org.apache.hadoop.hbase.quotas.QuotaScope; @@ -150,11 +146,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationPr import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.LiveServerInfo; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad; import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos.HBaseVersionFileContent; import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair; @@ -2731,23 +2724,14 @@ public final class ProtobufUtil { } public static ReplicationLoadSink toReplicationLoadSink( - ClusterStatusProtos.ReplicationLoadSink cls) { - return new ReplicationLoadSink(cls.getAgeOfLastAppliedOp(), cls.getTimeStampsOfLastAppliedOp()); + ClusterStatusProtos.ReplicationLoadSink rls) { + return new ReplicationLoadSink(rls.getAgeOfLastAppliedOp(), rls.getTimeStampsOfLastAppliedOp()); } public static ReplicationLoadSource toReplicationLoadSource( - ClusterStatusProtos.ReplicationLoadSource cls) { - return new ReplicationLoadSource(cls.getPeerID(), cls.getAgeOfLastShippedOp(), - cls.getSizeOfLogQueue(), cls.getTimeStampOfLastShippedOp(), cls.getReplicationLag()); - } - - public static List toReplicationLoadSourceList( - List clsList) { - ArrayList rlsList = new ArrayList<>(clsList.size()); - for (ClusterStatusProtos.ReplicationLoadSource cls : clsList) { - rlsList.add(toReplicationLoadSource(cls)); - } - return rlsList; + ClusterStatusProtos.ReplicationLoadSource rls) { + return new ReplicationLoadSource(rls.getPeerID(), rls.getAgeOfLastShippedOp(), + rls.getSizeOfLogQueue(), rls.getTimeStampOfLastShippedOp(), rls.getReplicationLag()); } /** @@ -2991,213 +2975,8 @@ public final class ProtobufUtil { * @param proto the protobuf ClusterStatus * @return the converted ClusterStatus */ - public static ClusterStatus convert(ClusterStatusProtos.ClusterStatus proto) { - ClusterStatus.Builder builder = ClusterStatus.newBuilder(); - - Map servers = null; - servers = new HashMap<>(proto.getLiveServersList().size()); - for (LiveServerInfo lsi : proto.getLiveServersList()) { - servers.put(ProtobufUtil.toServerName( - lsi.getServer()), new ServerLoad(lsi.getServerLoad())); - } - - List deadServers = new ArrayList<>(proto.getDeadServersList().size()); - for (HBaseProtos.ServerName sn : proto.getDeadServersList()) { - deadServers.add(ProtobufUtil.toServerName(sn)); - } - - List backupMasters = new ArrayList<>(proto.getBackupMastersList().size()); - for (HBaseProtos.ServerName sn : proto.getBackupMastersList()) { - backupMasters.add(ProtobufUtil.toServerName(sn)); - } - - List rit = - new ArrayList<>(proto.getRegionsInTransitionList().size()); - for (RegionInTransition region : proto.getRegionsInTransitionList()) { - RegionState value = RegionState.convert(region.getRegionState()); - rit.add(value); - } - - String[] masterCoprocessors = null; - final int numMasterCoprocessors = proto.getMasterCoprocessorsCount(); - masterCoprocessors = new String[numMasterCoprocessors]; - for (int i = 0; i < numMasterCoprocessors; i++) { - masterCoprocessors[i] = proto.getMasterCoprocessors(i).getName(); - } - - String clusterId = null; - if (proto.hasClusterId()) { - clusterId = ClusterId.convert(proto.getClusterId()).toString(); - } - - String hbaseVersion = null; - if (proto.hasHbaseVersion()) { - hbaseVersion = proto.getHbaseVersion().getVersion(); - } - - ServerName master = null; - if (proto.hasMaster()) { - master = ProtobufUtil.toServerName(proto.getMaster()); - } - - Boolean balancerOn = null; - if (proto.hasBalancerOn()) { - balancerOn = proto.getBalancerOn(); - } - - builder.setHBaseVersion(hbaseVersion) - .setClusterId(clusterId) - .setLiveServers(servers) - .setDeadServers(deadServers) - .setMaster(master) - .setBackupMasters(backupMasters) - .setRegionState(rit) - .setMasterCoprocessors(masterCoprocessors) - .setBalancerOn(balancerOn); - if (proto.hasMasterInfoPort()) { - builder.setMasterInfoPort(proto.getMasterInfoPort()); - } - return builder.build(); - } - - /** - * Convert ClusterStatusProtos.Option to ClusterStatus.Option - * @param option a ClusterStatusProtos.Option - * @return converted ClusterStatus.Option - */ - public static ClusterStatus.Option toOption(ClusterStatusProtos.Option option) { - switch (option) { - case HBASE_VERSION: return ClusterStatus.Option.HBASE_VERSION; - case LIVE_SERVERS: return ClusterStatus.Option.LIVE_SERVERS; - case DEAD_SERVERS: return ClusterStatus.Option.DEAD_SERVERS; - case REGIONS_IN_TRANSITION: return ClusterStatus.Option.REGIONS_IN_TRANSITION; - case CLUSTER_ID: return ClusterStatus.Option.CLUSTER_ID; - case MASTER_COPROCESSORS: return ClusterStatus.Option.MASTER_COPROCESSORS; - case MASTER: return ClusterStatus.Option.MASTER; - case BACKUP_MASTERS: return ClusterStatus.Option.BACKUP_MASTERS; - case BALANCER_ON: return ClusterStatus.Option.BALANCER_ON; - case MASTER_INFO_PORT: return ClusterStatus.Option.MASTER_INFO_PORT; - // should not reach here - default: throw new IllegalArgumentException("Invalid option: " + option); - } - } - - /** - * Convert ClusterStatus.Option to ClusterStatusProtos.Option - * @param option a ClusterStatus.Option - * @return converted ClusterStatusProtos.Option - */ - public static ClusterStatusProtos.Option toOption(ClusterStatus.Option option) { - switch (option) { - case HBASE_VERSION: return ClusterStatusProtos.Option.HBASE_VERSION; - case LIVE_SERVERS: return ClusterStatusProtos.Option.LIVE_SERVERS; - case DEAD_SERVERS: return ClusterStatusProtos.Option.DEAD_SERVERS; - case REGIONS_IN_TRANSITION: return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION; - case CLUSTER_ID: return ClusterStatusProtos.Option.CLUSTER_ID; - case MASTER_COPROCESSORS: return ClusterStatusProtos.Option.MASTER_COPROCESSORS; - case MASTER: return ClusterStatusProtos.Option.MASTER; - case BACKUP_MASTERS: return ClusterStatusProtos.Option.BACKUP_MASTERS; - case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON; - case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT; - // should not reach here - default: throw new IllegalArgumentException("Invalid option: " + option); - } - } - - /** - * Convert a list of ClusterStatusProtos.Option to an enum set of ClusterStatus.Option - * @param options - * @return an enum set of ClusterStatus.Option - */ - public static EnumSet