HBASE-22988 Backport HBASE-11062 "hbtop" to branch-1

Fixes #647

Signed-off-by: Andrew Purtell <apurtell@apache.org>
This commit is contained in:
Toshihiro Suzuki 2019-09-20 15:42:15 +09:00 committed by Andrew Purtell
parent 37e5e47faa
commit dd9eadb00f
No known key found for this signature in database
GPG Key ID: 8597754DD5365CCD
80 changed files with 10035 additions and 0 deletions

View File

@ -105,6 +105,7 @@ if [ $# = 0 ]; then
echo " pe Run PerformanceEvaluation"
echo " ltt Run LoadTestTool"
echo " canary Run the Canary tool"
echo " hbtop Run the HBTop tool"
echo " version Print the version"
echo " CLASSNAME Run the class named CLASSNAME"
exit 1
@ -402,6 +403,12 @@ elif [ "$COMMAND" = "ltt" ] ; then
elif [ "$COMMAND" = "canary" ] ; then
CLASS='org.apache.hadoop.hbase.tool.Canary'
HBASE_OPTS="$HBASE_OPTS $HBASE_CANARY_OPTS"
elif [ "$COMMAND" = "hbtop" ] ; then
CLASS='org.apache.hadoop.hbase.hbtop.HBTop'
if [ -f "${HBASE_HOME}/conf/log4j-hbtop.properties" ] ; then
HBASE_HBTOP_OPTS="${HBASE_HBTOP_OPTS} -Dlog4j.configuration=file:${HBASE_HOME}/conf/log4j-hbtop.properties"
fi
HBASE_OPTS="${HBASE_OPTS} ${HBASE_HBTOP_OPTS}"
elif [ "$COMMAND" = "version" ] ; then
CLASS='org.apache.hadoop.hbase.util.VersionInfo'
elif [ "$COMMAND" = "completebulkload" ] ; then

View File

@ -0,0 +1,27 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
log4j.rootLogger=WARN,console
log4j.threshold=WARN
# console
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
# ZooKeeper will still put stuff at WARN
log4j.logger.org.apache.zookeeper=ERROR

View File

@ -227,5 +227,10 @@
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-hbtop</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
</project>

241
hbase-hbtop/pom.xml Normal file
View File

@ -0,0 +1,241 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<!--
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-->
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>hbase</artifactId>
<groupId>org.apache.hbase</groupId>
<version>1.5.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
<artifactId>hbase-hbtop</artifactId>
<name>Apache HBase - HBTop</name>
<description>A real-time monitoring tool for HBase like Unix's top command</description>
<build>
<plugins>
<!-- Make a jar and put the sources in the jar -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
</plugin>
</plugins>
</build>
<dependencies>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
</dependency>
<dependency>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-annotations</artifactId>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-annotations</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
<type>jar</type>
<exclusions>
<exclusion>
<groupId>com.fasterxml.jackson.jaxrs</groupId>
<artifactId>jackson-jaxrs-json-provider</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-protocol</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<exclusions>
<exclusion>
<groupId>com.fasterxml.jackson.jaxrs</groupId>
<artifactId>jackson-jaxrs-json-provider</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
<profiles>
<!-- Needs to make the profile in apache parent pom -->
<profile>
<id>apache-release</id>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-resources-plugin</artifactId>
<executions>
<execution>
<id>license-javadocs</id>
<phase>prepare-package</phase>
<goals>
<goal>copy-resources</goal>
</goals>
<configuration>
<outputDirectory>${project.build.directory}/apidocs</outputDirectory>
<resources>
<resource>
<directory>src/main/javadoc/META-INF/</directory>
<targetPath>META-INF/</targetPath>
<includes>
<include>NOTICE</include>
</includes>
<filtering>true</filtering>
</resource>
</resources>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<!-- Skip the tests in this module -->
<profile>
<id>skipCommonTests</id>
<activation>
<property>
<name>skipCommonTests</name>
</property>
</activation>
<properties>
<surefire.skipFirstPart>true</surefire.skipFirstPart>
</properties>
</profile>
<profile>
<id>errorProne</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<build>
<plugins>
<!-- Turn on error-prone -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven.compiler.version}</version>
<configuration>
<compilerId>javac-with-errorprone</compilerId>
<forceJavacCompilerUse>true</forceJavacCompilerUse>
<showWarnings>true</showWarnings>
<compilerArgs>
<arg>-XepDisableWarningsInGeneratedCode</arg>
<arg>-Xep:FallThrough:OFF</arg> <!-- already in findbugs -->
</compilerArgs>
<annotationProcessorPaths>
<path>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-error-prone</artifactId>
<version>${project.version}</version>
</path>
</annotationProcessorPaths>
</configuration>
<dependencies>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-error-prone</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.codehaus.plexus</groupId>
<artifactId>plexus-compiler-javac-errorprone</artifactId>
<version>${plexus.errorprone.javac.version}</version>
</dependency>
<!-- override plexus-compiler-javac-errorprone's dependency on
Error Prone with the latest version -->
<dependency>
<groupId>com.google.errorprone</groupId>
<artifactId>error_prone_core</artifactId>
<version>${error-prone.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-error-prone</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -0,0 +1,140 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop;
import java.util.Objects;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.mode.Mode;
import org.apache.hadoop.hbase.hbtop.screen.Screen;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* A real-time monitoring tool for HBase like Unix top command.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
public class HBTop extends Configured implements Tool {
private static final Log LOG = LogFactory.getLog(HBTop.class);
public HBTop() {
this(HBaseConfiguration.create());
}
public HBTop(Configuration conf) {
super(Objects.requireNonNull(conf));
}
@Override
public int run(String[] args) throws Exception {
long initialRefreshDelay = 3 * 1000;
Mode initialMode = Mode.REGION;
try {
// Command line options
Options opts = new Options();
opts.addOption("h", "help", false,
"Print usage; for help while the tool is running press 'h'");
opts.addOption("d", "delay", true,
"The refresh delay (in seconds); default is 3 seconds");
opts.addOption("m", "mode", true,
"The mode; n (Namespace)|t (Table)|r (Region)|s (RegionServer)"
+ ", default is r (Region)");
CommandLine commandLine = new BasicParser().parse(opts, args);
if (commandLine.hasOption("help")) {
printUsage(opts);
return 0;
}
if (commandLine.hasOption("delay")) {
int delay = 0;
try {
delay = Integer.parseInt(commandLine.getOptionValue("delay"));
} catch (NumberFormatException ignored) {
}
if (delay < 1) {
LOG.warn("Delay set too low or invalid, using default");
} else {
initialRefreshDelay = delay * 1000L;
}
}
if (commandLine.hasOption("mode")) {
String mode = commandLine.getOptionValue("mode");
switch (mode) {
case "n":
initialMode = Mode.NAMESPACE;
break;
case "t":
initialMode = Mode.TABLE;
break;
case "r":
initialMode = Mode.REGION;
break;
case "s":
initialMode = Mode.REGION_SERVER;
break;
default:
LOG.warn("Mode set invalid, using default");
break;
}
}
} catch (Exception e) {
LOG.error("Unable to parse options", e);
return 1;
}
try (Screen screen = new Screen(getConf(), initialRefreshDelay, initialMode)) {
screen.run();
}
return 0;
}
private void printUsage(Options opts) {
new HelpFormatter().printHelp("hbase hbtop [opts] [-D<property=value>]*", opts);
System.out.println("");
System.out.println(" Note: -D properties will be applied to the conf used.");
System.out.println(" For example:");
System.out.println(" -Dhbase.client.zookeeper.quorum=<zookeeper quorum>");
System.out.println(" -Dzookeeper.znode.parent=<znode parent>");
System.out.println("");
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new HBTop(), args);
System.exit(res);
}
}

View File

@ -0,0 +1,185 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop;
import com.google.common.collect.ImmutableMap;
import edu.umd.cs.findbugs.annotations.NonNull;
import java.util.AbstractMap;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.FieldValue;
import org.apache.hadoop.hbase.hbtop.field.FieldValueType;
/**
* Represents a record of the metrics in the top screen.
*/
@InterfaceAudience.Private
public final class Record implements Map<Field, FieldValue> {
private final ImmutableMap<Field, FieldValue> values;
public final static class Entry extends AbstractMap.SimpleImmutableEntry<Field, FieldValue> {
private Entry(Field key, FieldValue value) {
super(key, value);
}
}
public final static class Builder {
private final ImmutableMap.Builder<Field, FieldValue> builder;
private Builder() {
builder = ImmutableMap.builder();
}
public Builder put(Field key, Object value) {
builder.put(key, key.newValue(value));
return this;
}
public Builder put(Field key, FieldValue value) {
builder.put(key, value);
return this;
}
public Builder put(Entry entry) {
builder.put(entry);
return this;
}
public Builder putAll(Map<Field, FieldValue> map) {
builder.putAll(map);
return this;
}
public Record build() {
return new Record(builder.build());
}
}
public static Builder builder() {
return new Builder();
}
public static Entry entry(Field field, Object value) {
return new Entry(field, field.newValue(value));
}
public static Entry entry(Field field, FieldValue value) {
return new Entry(field, value);
}
public static Record ofEntries(Entry... entries) {
Builder builder = builder();
for (Entry entry : entries) {
builder.put(entry.getKey(), entry.getValue());
}
return builder.build();
}
public static Record ofEntries(Iterable<Entry> entries) {
Builder builder = builder();
for (Entry entry : entries) {
builder.put(entry.getKey(), entry.getValue());
}
return builder.build();
}
private Record(ImmutableMap<Field, FieldValue> values) {
this.values = values;
}
@Override
public int size() {
return values.size();
}
@Override
public boolean isEmpty() {
return values.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return values.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return values.containsValue(value);
}
@Override
public FieldValue get(Object key) {
return values.get(key);
}
@Override
public FieldValue put(Field key, FieldValue value) {
throw new UnsupportedOperationException();
}
@Override
public FieldValue remove(Object key) {
throw new UnsupportedOperationException();
}
@Override
public void putAll(@NonNull Map<? extends Field, ? extends FieldValue> m) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
@Override
@NonNull
public Set<Field> keySet() {
return values.keySet();
}
@Override
@NonNull
public Collection<FieldValue> values() {
return values.values();
}
@Override
@NonNull
public Set<Map.Entry<Field, FieldValue>> entrySet() {
return values.entrySet();
}
public Record combine(Record o) {
Builder builder = builder();
for (Field k : values.keySet()) {
if (k.getFieldValueType() == FieldValueType.STRING) {
builder.put(k, values.get(k));
} else {
builder.put(k, values.get(k).plus(o.values.get(k)));
}
}
return builder.build();
}
}

View File

@ -0,0 +1,339 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.FieldValue;
/**
* Represents a filter that's filtering the metric {@link Record}s.
*/
@InterfaceAudience.Private
public final class RecordFilter {
private enum Operator {
EQUAL("="),
DOUBLE_EQUALS("=="),
GREATER(">"),
GREATER_OR_EQUAL(">="),
LESS("<"),
LESS_OR_EQUAL("<=");
private final String operator;
Operator(String operator) {
this.operator = operator;
}
@Override
public String toString() {
return operator;
}
}
public static RecordFilter parse(String filterString, boolean ignoreCase) {
return parse(filterString, Arrays.asList(Field.values()), ignoreCase);
}
/*
* Parse a filter string and build a RecordFilter instance.
*/
public static RecordFilter parse(String filterString, List<Field> fields, boolean ignoreCase) {
int index = 0;
boolean not = isNot(filterString);
if (not) {
index += 1;
}
StringBuilder fieldString = new StringBuilder();
while (filterString.length() > index && filterString.charAt(index) != '<'
&& filterString.charAt(index) != '>' && filterString.charAt(index) != '=') {
fieldString.append(filterString.charAt(index++));
}
if (fieldString.length() == 0 || filterString.length() == index) {
return null;
}
Field field = getField(fields, fieldString.toString());
if (field == null) {
return null;
}
StringBuilder operatorString = new StringBuilder();
while (filterString.length() > index && (filterString.charAt(index) == '<' ||
filterString.charAt(index) == '>' || filterString.charAt(index) == '=')) {
operatorString.append(filterString.charAt(index++));
}
Operator operator = getOperator(operatorString.toString());
if (operator == null) {
return null;
}
String value = filterString.substring(index);
FieldValue fieldValue = getFieldValue(field, value);
if (fieldValue == null) {
return null;
}
return new RecordFilter(ignoreCase, not, field, operator, fieldValue);
}
private static FieldValue getFieldValue(Field field, String value) {
try {
return field.newValue(value);
} catch (Exception e) {
return null;
}
}
private static boolean isNot(String filterString) {
return filterString.startsWith("!");
}
private static Field getField(List<Field> fields, String fieldString) {
for (Field f : fields) {
if (f.getHeader().equals(fieldString)) {
return f;
}
}
return null;
}
private static Operator getOperator(String operatorString) {
for (Operator o : Operator.values()) {
if (operatorString.equals(o.toString())) {
return o;
}
}
return null;
}
private final boolean ignoreCase;
private final boolean not;
private final Field field;
private final Operator operator;
private final FieldValue value;
private RecordFilter(boolean ignoreCase, boolean not, Field field, Operator operator,
FieldValue value) {
this.ignoreCase = ignoreCase;
this.not = not;
this.field = Objects.requireNonNull(field);
this.operator = Objects.requireNonNull(operator);
this.value = Objects.requireNonNull(value);
}
public boolean execute(Record record) {
FieldValue fieldValue = record.get(field);
if (fieldValue == null) {
return false;
}
if (operator == Operator.EQUAL) {
boolean ret;
if (ignoreCase) {
ret = fieldValue.asString().toLowerCase().contains(value.asString().toLowerCase());
} else {
ret = fieldValue.asString().contains(value.asString());
}
return not != ret;
}
int compare = ignoreCase ?
fieldValue.compareToIgnoreCase(value) : fieldValue.compareTo(value);
boolean ret;
switch (operator) {
case DOUBLE_EQUALS:
ret = compare == 0;
break;
case GREATER:
ret = compare > 0;
break;
case GREATER_OR_EQUAL:
ret = compare >= 0;
break;
case LESS:
ret = compare < 0;
break;
case LESS_OR_EQUAL:
ret = compare <= 0;
break;
default:
throw new AssertionError();
}
return not != ret;
}
@Override
public String toString() {
return (not ? "!" : "") + field.getHeader() + operator + value.asString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof RecordFilter)) {
return false;
}
RecordFilter filter = (RecordFilter) o;
return ignoreCase == filter.ignoreCase && not == filter.not && field == filter.field
&& operator == filter.operator && value.equals(filter.value);
}
@Override
public int hashCode() {
return Objects.hash(ignoreCase, not, field, operator, value);
}
/*
* For FilterBuilder
*/
public static FilterBuilder newBuilder(Field field) {
return new FilterBuilder(field, false);
}
public static FilterBuilder newBuilder(Field field, boolean ignoreCase) {
return new FilterBuilder(field, ignoreCase);
}
public static final class FilterBuilder {
private final Field field;
private final boolean ignoreCase;
private FilterBuilder(Field field, boolean ignoreCase) {
this.field = Objects.requireNonNull(field);
this.ignoreCase = ignoreCase;
}
public RecordFilter equal(FieldValue value) {
return newFilter(false, Operator.EQUAL, value);
}
public RecordFilter equal(Object value) {
return equal(field.newValue(value));
}
public RecordFilter notEqual(FieldValue value) {
return newFilter(true, Operator.EQUAL, value);
}
public RecordFilter notEqual(Object value) {
return notEqual(field.newValue(value));
}
public RecordFilter doubleEquals(FieldValue value) {
return newFilter(false, Operator.DOUBLE_EQUALS, value);
}
public RecordFilter doubleEquals(Object value) {
return doubleEquals(field.newValue(value));
}
public RecordFilter notDoubleEquals(FieldValue value) {
return newFilter(true, Operator.DOUBLE_EQUALS, value);
}
public RecordFilter notDoubleEquals(Object value) {
return notDoubleEquals(field.newValue(value));
}
public RecordFilter greater(FieldValue value) {
return newFilter(false, Operator.GREATER, value);
}
public RecordFilter greater(Object value) {
return greater(field.newValue(value));
}
public RecordFilter notGreater(FieldValue value) {
return newFilter(true, Operator.GREATER, value);
}
public RecordFilter notGreater(Object value) {
return notGreater(field.newValue(value));
}
public RecordFilter greaterOrEqual(FieldValue value) {
return newFilter(false, Operator.GREATER_OR_EQUAL, value);
}
public RecordFilter greaterOrEqual(Object value) {
return greaterOrEqual(field.newValue(value));
}
public RecordFilter notGreaterOrEqual(FieldValue value) {
return newFilter(true, Operator.GREATER_OR_EQUAL, value);
}
public RecordFilter notGreaterOrEqual(Object value) {
return notGreaterOrEqual(field.newValue(value));
}
public RecordFilter less(FieldValue value) {
return newFilter(false, Operator.LESS, value);
}
public RecordFilter less(Object value) {
return less(field.newValue(value));
}
public RecordFilter notLess(FieldValue value) {
return newFilter(true, Operator.LESS, value);
}
public RecordFilter notLess(Object value) {
return notLess(field.newValue(value));
}
public RecordFilter lessOrEqual(FieldValue value) {
return newFilter(false, Operator.LESS_OR_EQUAL, value);
}
public RecordFilter lessOrEqual(Object value) {
return lessOrEqual(field.newValue(value));
}
public RecordFilter notLessOrEqual(FieldValue value) {
return newFilter(true, Operator.LESS_OR_EQUAL, value);
}
public RecordFilter notLessOrEqual(Object value) {
return notLessOrEqual(field.newValue(value));
}
private RecordFilter newFilter(boolean not, Operator operator, FieldValue value) {
return new RecordFilter(ignoreCase, not, field, operator, value);
}
}
}

View File

@ -0,0 +1,98 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.field;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Represents fields that are displayed in the top screen.
*/
@InterfaceAudience.Private
public enum Field {
REGION_NAME("RNAME", "Region Name", true, true, FieldValueType.STRING),
NAMESPACE("NAMESPACE", "Namespace Name", true, true, FieldValueType.STRING),
TABLE("TABLE", "Table Name", true, true, FieldValueType.STRING),
START_CODE("SCODE", "Start Code", false, true, FieldValueType.STRING),
REPLICA_ID("REPID", "Replica ID", false, false, FieldValueType.STRING),
REGION("REGION", "Encoded Region Name", false, true, FieldValueType.STRING),
REGION_SERVER("RS", "Short Region Server Name", true, true, FieldValueType.STRING),
LONG_REGION_SERVER("LRS", "Long Region Server Name", true, true, FieldValueType.STRING),
REQUEST_COUNT_PER_SECOND("#REQ/S", "Request Count per second", false, false,
FieldValueType.LONG),
READ_REQUEST_COUNT_PER_SECOND("#READ/S", "Read Request Count per second", false, false,
FieldValueType.LONG),
WRITE_REQUEST_COUNT_PER_SECOND("#WRITE/S", "Write Request Count per second", false, false,
FieldValueType.LONG),
STORE_FILE_SIZE("SF", "StoreFile Size", false, false, FieldValueType.SIZE),
UNCOMPRESSED_STORE_FILE_SIZE("USF", "Uncompressed StoreFile Size", false, false,
FieldValueType.SIZE),
NUM_STORE_FILES("#SF", "Number of StoreFiles", false, false, FieldValueType.INTEGER),
MEM_STORE_SIZE("MEMSTORE", "MemStore Size", false, false, FieldValueType.SIZE),
LOCALITY("LOCALITY", "Block Locality", false, false, FieldValueType.FLOAT),
START_KEY("SKEY", "Start Key", true, true, FieldValueType.STRING),
COMPACTING_CELL_COUNT("#COMPingCELL", "Compacting Cell Count", false, false,
FieldValueType.LONG),
COMPACTED_CELL_COUNT("#COMPedCELL", "Compacted Cell Count", false, false, FieldValueType.LONG),
COMPACTION_PROGRESS("%COMP", "Compaction Progress", false, false, FieldValueType.PERCENT),
LAST_MAJOR_COMPACTION_TIME("LASTMCOMP", "Last Major Compaction Time", false, true,
FieldValueType.STRING),
REGION_COUNT("#REGION", "Region Count", false, false, FieldValueType.INTEGER),
USED_HEAP_SIZE("UHEAP", "Used Heap Size", false, false, FieldValueType.SIZE),
MAX_HEAP_SIZE("MHEAP", "Max Heap Size", false, false, FieldValueType.SIZE);
private final String header;
private final String description;
private final boolean autoAdjust;
private final boolean leftJustify;
private final FieldValueType fieldValueType;
Field(String header, String description, boolean autoAdjust, boolean leftJustify,
FieldValueType fieldValueType) {
this.header = Objects.requireNonNull(header);
this.description = Objects.requireNonNull(description);
this.autoAdjust = autoAdjust;
this.leftJustify = leftJustify;
this.fieldValueType = Objects.requireNonNull(fieldValueType);
}
public FieldValue newValue(Object value) {
return new FieldValue(value, fieldValueType);
}
public String getHeader() {
return header;
}
public String getDescription() {
return description;
}
public boolean isAutoAdjust() {
return autoAdjust;
}
public boolean isLeftJustify() {
return leftJustify;
}
public FieldValueType getFieldValueType() {
return fieldValueType;
}
}

View File

@ -0,0 +1,55 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.field;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Information about a field.
*
* This has a {@link Field} itself and additional information (e.g. {@code defaultLength} and
* {@code displayByDefault}). This additional information is different between the
* {@link org.apache.hadoop.hbase.hbtop.mode.Mode}s even when the field is the same. That's why the
* additional information is separated from {@link Field}.
*/
@InterfaceAudience.Private
public class FieldInfo {
private final Field field;
private final int defaultLength;
private final boolean displayByDefault;
public FieldInfo(Field field, int defaultLength, boolean displayByDefault) {
this.field = Objects.requireNonNull(field);
this.defaultLength = defaultLength;
this.displayByDefault = displayByDefault;
}
public Field getField() {
return field;
}
public int getDefaultLength() {
return defaultLength;
}
public boolean isDisplayByDefault() {
return displayByDefault;
}
}

View File

@ -0,0 +1,283 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.field;
import edu.umd.cs.findbugs.annotations.NonNull;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Represents a value of a field.
*
* The type of a value is defined by {@link FieldValue}.
*/
@InterfaceAudience.Private
public final class FieldValue implements Comparable<FieldValue> {
private final Object value;
private final FieldValueType type;
FieldValue(Object value, FieldValueType type) {
Objects.requireNonNull(value);
this.type = Objects.requireNonNull(type);
switch (type) {
case STRING:
if (value instanceof String) {
this.value = value;
break;
}
throw new IllegalArgumentException("invalid type");
case INTEGER:
if (value instanceof Integer) {
this.value = value;
break;
} else if (value instanceof String) {
this.value = Integer.valueOf((String) value);
break;
}
throw new IllegalArgumentException("invalid type");
case LONG:
if (value instanceof Long) {
this.value = value;
break;
} else if (value instanceof String) {
this.value = Long.valueOf((String) value);
break;
}
throw new IllegalArgumentException("invalid type");
case FLOAT:
if (value instanceof Float) {
this.value = value;
break;
} else if (value instanceof String) {
this.value = Float.valueOf((String) value);
break;
}
throw new IllegalArgumentException("invalid type");
case SIZE:
if (value instanceof Size) {
this.value = optimizeSize((Size) value);
break;
} else if (value instanceof String) {
this.value = optimizeSize(parseSizeString((String) value));
break;
}
throw new IllegalArgumentException("invalid type");
case PERCENT:
if (value instanceof Float) {
this.value = value;
break;
} else if (value instanceof String) {
this.value = parsePercentString((String) value);
break;
}
throw new IllegalArgumentException("invalid type");
default:
throw new AssertionError();
}
}
private Size optimizeSize(Size size) {
if (size.get(Size.Unit.BYTE) < 1024d) {
return size.getUnit() == Size.Unit.BYTE ?
size : new Size(size.get(Size.Unit.BYTE), Size.Unit.BYTE);
} else if (size.get(Size.Unit.KILOBYTE) < 1024d) {
return size.getUnit() == Size.Unit.KILOBYTE ?
size : new Size(size.get(Size.Unit.KILOBYTE), Size.Unit.KILOBYTE);
} else if (size.get(Size.Unit.MEGABYTE) < 1024d) {
return size.getUnit() == Size.Unit.MEGABYTE ?
size : new Size(size.get(Size.Unit.MEGABYTE), Size.Unit.MEGABYTE);
} else if (size.get(Size.Unit.GIGABYTE) < 1024d) {
return size.getUnit() == Size.Unit.GIGABYTE ?
size : new Size(size.get(Size.Unit.GIGABYTE), Size.Unit.GIGABYTE);
} else if (size.get(Size.Unit.TERABYTE) < 1024d) {
return size.getUnit() == Size.Unit.TERABYTE ?
size : new Size(size.get(Size.Unit.TERABYTE), Size.Unit.TERABYTE);
}
return size.getUnit() == Size.Unit.PETABYTE ?
size : new Size(size.get(Size.Unit.PETABYTE), Size.Unit.PETABYTE);
}
private Size parseSizeString(String sizeString) {
if (sizeString.length() < 3) {
throw new IllegalArgumentException("invalid size");
}
String valueString = sizeString.substring(0, sizeString.length() - 2);
String unitSimpleName = sizeString.substring(sizeString.length() - 2);
return new Size(Double.parseDouble(valueString), convertToUnit(unitSimpleName));
}
private Size.Unit convertToUnit(String unitSimpleName) {
for (Size.Unit unit: Size.Unit.values()) {
if (unitSimpleName.equals(unit.getSimpleName())) {
return unit;
}
}
throw new IllegalArgumentException("invalid size");
}
private Float parsePercentString(String percentString) {
if (percentString.endsWith("%")) {
percentString = percentString.substring(0, percentString.length() - 1);
}
return Float.valueOf(percentString);
}
public String asString() {
return toString();
}
public int asInt() {
return (Integer) value;
}
public long asLong() {
return (Long) value;
}
public float asFloat() {
return (Float) value;
}
public Size asSize() {
return (Size) value;
}
@Override
public String toString() {
switch (type) {
case STRING:
case INTEGER:
case LONG:
case FLOAT:
case SIZE:
return value.toString();
case PERCENT:
return String.format("%.2f", (Float) value) + "%";
default:
throw new AssertionError();
}
}
@Override
public int compareTo(@NonNull FieldValue o) {
if (type != o.type) {
throw new IllegalArgumentException("invalid type");
}
switch (type) {
case STRING:
return ((String) value).compareTo((String) o.value);
case INTEGER:
return ((Integer) value).compareTo((Integer) o.value);
case LONG:
return ((Long) value).compareTo((Long) o.value);
case FLOAT:
case PERCENT:
return ((Float) value).compareTo((Float) o.value);
case SIZE:
return ((Size) value).compareTo((Size) o.value);
default:
throw new AssertionError();
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof FieldValue)) {
return false;
}
FieldValue that = (FieldValue) o;
return value.equals(that.value) && type == that.type;
}
@Override
public int hashCode() {
return Objects.hash(value, type);
}
public FieldValue plus(FieldValue o) {
if (type != o.type) {
throw new IllegalArgumentException("invalid type");
}
switch (type) {
case STRING:
return new FieldValue(((String) value).concat((String) o.value), type);
case INTEGER:
return new FieldValue(((Integer) value) + ((Integer) o.value), type);
case LONG:
return new FieldValue(((Long) value) + ((Long) o.value), type);
case FLOAT:
case PERCENT:
return new FieldValue(((Float) value) + ((Float) o.value), type);
case SIZE:
Size size = (Size) value;
Size oSize = (Size) o.value;
Size.Unit unit = size.getUnit();
return new FieldValue(new Size(size.get(unit) + oSize.get(unit), unit), type);
default:
throw new AssertionError();
}
}
public int compareToIgnoreCase(FieldValue o) {
if (type != o.type) {
throw new IllegalArgumentException("invalid type");
}
switch (type) {
case STRING:
return ((String) value).compareToIgnoreCase((String) o.value);
case INTEGER:
case LONG:
case FLOAT:
case SIZE:
case PERCENT:
return compareTo(o);
default:
throw new AssertionError();
}
}
}

View File

@ -0,0 +1,28 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.field;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Represents the type of a {@link FieldValue}.
*/
@InterfaceAudience.Private
public enum FieldValueType {
STRING, INTEGER, LONG, FLOAT, SIZE, PERCENT
}

View File

@ -0,0 +1,157 @@
/**
* Copyright The Apache Software Foundation
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.field;
import java.math.BigDecimal;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* It is used to represent the size with different units.
* This class doesn't serve for the precise computation.
*/
@InterfaceAudience.Private
public final class Size implements Comparable<Size> {
public static final Size ZERO = new Size(0, Unit.KILOBYTE);
private static final BigDecimal SCALE_BASE = BigDecimal.valueOf(1024D);
public enum Unit {
// keep the room to add more units for HBase 10.x
PETABYTE(100, "PB"),
TERABYTE(99, "TB"),
GIGABYTE(98, "GB"),
MEGABYTE(97, "MB"),
KILOBYTE(96, "KB"),
BYTE(95, "B");
private final int orderOfSize;
private final String simpleName;
Unit(int orderOfSize, String simpleName) {
this.orderOfSize = orderOfSize;
this.simpleName = simpleName;
}
public int getOrderOfSize() {
return orderOfSize;
}
public String getSimpleName() {
return simpleName;
}
}
private final double value;
private final Unit unit;
public Size(double value, Unit unit) {
if (value < 0) {
throw new IllegalArgumentException("The value:" + value + " can't be negative");
}
this.value = value;
this.unit = unit;
}
/**
* @return size unit
*/
public Unit getUnit() {
return unit;
}
/**
* get the value
*/
public long getLongValue() {
return (long) value;
}
/**
* get the value
*/
public double get() {
return value;
}
/**
* get the value which is converted to specified unit.
*
* @param unit size unit
* @return the converted value
*/
public double get(Unit unit) {
if (value == 0) {
return value;
}
int diff = this.unit.getOrderOfSize() - unit.getOrderOfSize();
if (diff == 0) {
return value;
}
BigDecimal rval = BigDecimal.valueOf(value);
for (int i = 0; i != Math.abs(diff); ++i) {
rval = diff > 0 ? rval.multiply(SCALE_BASE) : rval.divide(SCALE_BASE);
}
return rval.doubleValue();
}
@Override
public int compareTo(Size other) {
int diff = unit.getOrderOfSize() - other.unit.getOrderOfSize();
if (diff == 0) {
return Double.compare(value, other.value);
}
BigDecimal thisValue = BigDecimal.valueOf(value);
BigDecimal otherValue = BigDecimal.valueOf(other.value);
if (diff > 0) {
for (int i = 0; i != Math.abs(diff); ++i) {
thisValue = thisValue.multiply(SCALE_BASE);
}
} else {
for (int i = 0; i != Math.abs(diff); ++i) {
otherValue = otherValue.multiply(SCALE_BASE);
}
}
return thisValue.compareTo(otherValue);
}
@Override
public String toString() {
return value + unit.getSimpleName();
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (obj == this) {
return true;
}
if (obj instanceof Size) {
return compareTo((Size)obj) == 0;
}
return false;
}
@Override
public int hashCode() {
return Objects.hash(value, unit);
}
}

View File

@ -0,0 +1,50 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.mode;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.RecordFilter;
/**
* Information about drilling down.
*
* When drilling down, going to next {@link Mode} with initial {@link RecordFilter}s.
*/
@InterfaceAudience.Private
public class DrillDownInfo {
private final Mode nextMode;
private final List<RecordFilter> initialFilters;
public DrillDownInfo(Mode nextMode, List<RecordFilter> initialFilters) {
this.nextMode = Objects.requireNonNull(nextMode);
this.initialFilters = Collections.unmodifiableList(new ArrayList<>(initialFilters));
}
public Mode getNextMode() {
return nextMode;
}
public List<RecordFilter> getInitialFilters() {
return initialFilters;
}
}

View File

@ -0,0 +1,74 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.mode;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.util.List;
import java.util.Objects;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
/**
* Represents a display mode in the top screen.
*/
@InterfaceAudience.Private
public enum Mode {
NAMESPACE("Namespace", "Record per Namespace", new NamespaceModeStrategy()),
TABLE("Table", "Record per Table", new TableModeStrategy()),
REGION("Region", "Record per Region", new RegionModeStrategy()),
REGION_SERVER("RegionServer", "Record per RegionServer", new RegionServerModeStrategy());
private final String header;
private final String description;
private final ModeStrategy modeStrategy;
Mode(String header, String description, ModeStrategy modeStrategy) {
this.header = Objects.requireNonNull(header);
this.description = Objects.requireNonNull(description);
this.modeStrategy = Objects.requireNonNull(modeStrategy);
}
public String getHeader() {
return header;
}
public String getDescription() {
return description;
}
public List<Record> getRecords(ClusterStatus clusterStatus) {
return modeStrategy.getRecords(clusterStatus);
}
public List<FieldInfo> getFieldInfos() {
return modeStrategy.getFieldInfos();
}
public Field getDefaultSortField() {
return modeStrategy.getDefaultSortField();
}
@Nullable
public DrillDownInfo drillDown(Record currentRecord) {
return modeStrategy.drillDown(currentRecord);
}
}

View File

@ -0,0 +1,38 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.mode;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.util.List;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
/**
* An interface for strategy logic for {@link Mode}.
*/
@InterfaceAudience.Private
interface ModeStrategy {
List<FieldInfo> getFieldInfos();
Field getDefaultSortField();
List<Record> getRecords(ClusterStatus clusterStatus);
@Nullable DrillDownInfo drillDown(Record selectedRecord);
}

View File

@ -0,0 +1,105 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.mode;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.RecordFilter;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
/**
* Implementation for {@link ModeStrategy} for Namespace Mode.
*/
@InterfaceAudience.Private
public final class NamespaceModeStrategy implements ModeStrategy {
private final List<FieldInfo> fieldInfos = Arrays.asList(
new FieldInfo(Field.NAMESPACE, 0, true),
new FieldInfo(Field.REGION_COUNT, 7, true),
new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true),
new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true),
new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true),
new FieldInfo(Field.STORE_FILE_SIZE, 13, true),
new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false),
new FieldInfo(Field.NUM_STORE_FILES, 7, true),
new FieldInfo(Field.MEM_STORE_SIZE, 11, true)
);
private final RegionModeStrategy regionModeStrategy = new RegionModeStrategy();
NamespaceModeStrategy(){
}
@Override
public List<FieldInfo> getFieldInfos() {
return fieldInfos;
}
@Override
public Field getDefaultSortField() {
return Field.REQUEST_COUNT_PER_SECOND;
}
@Override
public List<Record> getRecords(ClusterStatus clusterStatus) {
// Get records from RegionModeStrategy and add REGION_COUNT field
List<Record> records = new ArrayList<>();
for (Record record : regionModeStrategy.getRecords(clusterStatus)) {
List<Record.Entry> entries = new ArrayList<>();
for (FieldInfo fieldInfo : fieldInfos) {
if (record.containsKey(fieldInfo.getField())) {
entries.add(Record.entry(fieldInfo.getField(),
record.get(fieldInfo.getField())));
}
}
// Add REGION_COUNT field
records.add(Record.builder().putAll(Record.ofEntries(entries))
.put(Field.REGION_COUNT, 1).build());
}
// Aggregation by NAMESPACE field
Map<String, Record> retMap = new HashMap<>();
for (Record record : records) {
String namespace = record.get(Field.NAMESPACE).asString();
if (retMap.containsKey(namespace)) {
retMap.put(namespace, retMap.get(namespace).combine(record));
} else {
retMap.put(namespace, record);
}
}
return new ArrayList<>(retMap.values());
}
@Override
public DrillDownInfo drillDown(Record selectedRecord) {
List<RecordFilter> initialFilters =
Collections.singletonList(RecordFilter.newBuilder(Field.NAMESPACE)
.doubleEquals(selectedRecord.get(Field.NAMESPACE)));
return new DrillDownInfo(Mode.TABLE, initialFilters);
}
}

View File

@ -0,0 +1,182 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.mode;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang3.time.FastDateFormat;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
import org.apache.hadoop.hbase.hbtop.field.Size;
import org.apache.hadoop.hbase.hbtop.field.Size.Unit;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Implementation for {@link ModeStrategy} for Region Mode.
*/
@InterfaceAudience.Private
public final class RegionModeStrategy implements ModeStrategy {
private final List<FieldInfo> fieldInfos = Arrays.asList(
new FieldInfo(Field.REGION_NAME, 0, false),
new FieldInfo(Field.NAMESPACE, 0, true),
new FieldInfo(Field.TABLE, 0, true),
new FieldInfo(Field.START_CODE, 13, false),
new FieldInfo(Field.REPLICA_ID, 5, false),
new FieldInfo(Field.REGION, 32, true),
new FieldInfo(Field.REGION_SERVER, 0, true),
new FieldInfo(Field.LONG_REGION_SERVER, 0, false),
new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 8, true),
new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 8, true),
new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 8, true),
new FieldInfo(Field.STORE_FILE_SIZE, 10, true),
new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 12, false),
new FieldInfo(Field.NUM_STORE_FILES,4, true),
new FieldInfo(Field.MEM_STORE_SIZE, 8, true),
new FieldInfo(Field.LOCALITY, 8, true),
new FieldInfo(Field.START_KEY, 0, false),
new FieldInfo(Field.COMPACTING_CELL_COUNT, 12, false),
new FieldInfo(Field.COMPACTED_CELL_COUNT, 12, false),
new FieldInfo(Field.COMPACTION_PROGRESS, 7, false),
new FieldInfo(Field.LAST_MAJOR_COMPACTION_TIME, 19, false)
);
private final Map<String, RequestCountPerSecond> requestCountPerSecondMap = new HashMap<>();
RegionModeStrategy() {
}
@Override
public List<FieldInfo> getFieldInfos() {
return fieldInfos;
}
@Override
public Field getDefaultSortField() {
return Field.REQUEST_COUNT_PER_SECOND;
}
@Override
public List<Record> getRecords(ClusterStatus clusterStatus) {
List<Record> ret = new ArrayList<>();
for (ServerName sn: clusterStatus.getServers()) {
ServerLoad sl = clusterStatus.getLoad(sn);
long lastReportTimestamp = sl.obtainServerLoadPB().getReportEndTime();
for (RegionLoad rl: sl.getRegionsLoad().values()) {
ret.add(createRecord(sn, rl, lastReportTimestamp));
}
}
return ret;
}
private Record createRecord(ServerName sn, RegionLoad regionLoad, long lastReportTimestamp) {
Record.Builder builder = Record.builder();
String regionName = regionLoad.getNameAsString();
builder.put(Field.REGION_NAME, regionName);
String namespaceName = "";
String tableName = "";
String region = "";
String startKey = "";
String startCode = "";
String replicaId = "";
try {
byte[][] elements = HRegionInfo.parseRegionName(regionLoad.getName());
TableName tn = TableName.valueOf(elements[0]);
namespaceName = tn.getNamespaceAsString();
tableName = tn.getQualifierAsString();
startKey = Bytes.toStringBinary(elements[1]);
startCode = Bytes.toString(elements[2]);
replicaId = elements.length == 4 ?
Integer.valueOf(Bytes.toString(elements[3])).toString() : "";
region = HRegionInfo.encodeRegionName(regionLoad.getName());
} catch (IOException ignored) {
}
builder.put(Field.NAMESPACE, namespaceName);
builder.put(Field.TABLE, tableName);
builder.put(Field.START_CODE, startCode);
builder.put(Field.REPLICA_ID, replicaId);
builder.put(Field.REGION, region);
builder.put(Field.START_KEY, startKey);
builder.put(Field.REGION_SERVER, sn.toShortString());
builder.put(Field.LONG_REGION_SERVER, sn.getServerName());
RequestCountPerSecond requestCountPerSecond = requestCountPerSecondMap.get(regionName);
if (requestCountPerSecond == null) {
requestCountPerSecond = new RequestCountPerSecond();
requestCountPerSecondMap.put(regionName, requestCountPerSecond);
}
requestCountPerSecond.refresh(lastReportTimestamp, regionLoad.getReadRequestsCount(),
regionLoad.getWriteRequestsCount());
builder.put(Field.READ_REQUEST_COUNT_PER_SECOND,
requestCountPerSecond.getReadRequestCountPerSecond());
builder.put(Field.WRITE_REQUEST_COUNT_PER_SECOND,
requestCountPerSecond.getWriteRequestCountPerSecond());
builder.put(Field.REQUEST_COUNT_PER_SECOND,
requestCountPerSecond.getRequestCountPerSecond());
builder.put(Field.STORE_FILE_SIZE, new Size(regionLoad.getStorefileSizeMB(), Unit.MEGABYTE));
builder.put(Field.UNCOMPRESSED_STORE_FILE_SIZE,
new Size(regionLoad.getStoreUncompressedSizeMB(), Unit.MEGABYTE));
builder.put(Field.NUM_STORE_FILES, regionLoad.getStorefiles());
builder.put(Field.MEM_STORE_SIZE, new Size(regionLoad.getMemStoreSizeMB(), Unit.MEGABYTE));
builder.put(Field.LOCALITY, regionLoad.getDataLocality());
long compactingCellCount = regionLoad.getTotalCompactingKVs();
long compactedCellCount = regionLoad.getCurrentCompactedKVs();
float compactionProgress = 0;
if (compactedCellCount > 0) {
compactionProgress = 100 * ((float) compactedCellCount / compactingCellCount);
}
builder.put(Field.COMPACTING_CELL_COUNT, compactingCellCount);
builder.put(Field.COMPACTED_CELL_COUNT, compactedCellCount);
builder.put(Field.COMPACTION_PROGRESS, compactionProgress);
FastDateFormat df = FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss");
long lastMajorCompactionTimestamp = regionLoad.getLastMajorCompactionTs();
builder.put(Field.LAST_MAJOR_COMPACTION_TIME,
lastMajorCompactionTimestamp == 0 ? "" : df.format(lastMajorCompactionTimestamp));
return builder.build();
}
@Nullable
@Override
public DrillDownInfo drillDown(Record selectedRecord) {
// do nothing
return null;
}
}

View File

@ -0,0 +1,124 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.mode;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.RecordFilter;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
import org.apache.hadoop.hbase.hbtop.field.Size;
import org.apache.hadoop.hbase.hbtop.field.Size.Unit;
/**
* Implementation for {@link ModeStrategy} for RegionServer Mode.
*/
@InterfaceAudience.Private
public final class RegionServerModeStrategy implements ModeStrategy {
private final List<FieldInfo> fieldInfos = Arrays.asList(
new FieldInfo(Field.REGION_SERVER, 0, true),
new FieldInfo(Field.LONG_REGION_SERVER, 0, false),
new FieldInfo(Field.REGION_COUNT, 7, true),
new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true),
new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true),
new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true),
new FieldInfo(Field.STORE_FILE_SIZE, 13, true),
new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false),
new FieldInfo(Field.NUM_STORE_FILES, 7, true),
new FieldInfo(Field.MEM_STORE_SIZE, 11, true),
new FieldInfo(Field.USED_HEAP_SIZE, 11, true),
new FieldInfo(Field.MAX_HEAP_SIZE, 11, true)
);
private final RegionModeStrategy regionModeStrategy = new RegionModeStrategy();
RegionServerModeStrategy(){
}
@Override
public List<FieldInfo> getFieldInfos() {
return fieldInfos;
}
@Override
public Field getDefaultSortField() {
return Field.REQUEST_COUNT_PER_SECOND;
}
@Override
public List<Record> getRecords(ClusterStatus clusterStatus) {
// Get records from RegionModeStrategy and add REGION_COUNT field
List<Record> records = new ArrayList<>();
for (Record record : regionModeStrategy.getRecords(clusterStatus)) {
List<Record.Entry> entries = new ArrayList<>();
for (FieldInfo fieldInfo : fieldInfos) {
if (record.containsKey(fieldInfo.getField())) {
entries.add(Record.entry(fieldInfo.getField(),
record.get(fieldInfo.getField())));
}
}
// Add REGION_COUNT field
records.add(Record.builder().putAll(Record.ofEntries(entries))
.put(Field.REGION_COUNT, 1).build());
}
// Aggregation by NAMESPACE field
Map<String, Record> retMap = new HashMap<>();
for (Record record : records) {
String regionServer = record.get(Field.LONG_REGION_SERVER).asString();
if (retMap.containsKey(regionServer)) {
retMap.put(regionServer, retMap.get(regionServer).combine(record));
} else {
retMap.put(regionServer, record);
}
}
// Add USED_HEAP_SIZE field and MAX_HEAP_SIZE field
for (ServerName sn : clusterStatus.getServers()) {
Record record = retMap.get(sn.getServerName());
if (record == null) {
continue;
}
ServerLoad sl = clusterStatus.getLoad(sn);
Record newRecord = Record.builder().putAll(record)
.put(Field.USED_HEAP_SIZE, new Size(sl.getUsedHeapMB(), Unit.MEGABYTE))
.put(Field.MAX_HEAP_SIZE, new Size(sl.getMaxHeapMB(), Unit.MEGABYTE)).build();
retMap.put(sn.getServerName(), newRecord);
}
return new ArrayList<>(retMap.values());
}
@Override
public DrillDownInfo drillDown(Record selectedRecord) {
List<RecordFilter> initialFilters = Collections.singletonList(RecordFilter
.newBuilder(Field.REGION_SERVER)
.doubleEquals(selectedRecord.get(Field.REGION_SERVER)));
return new DrillDownInfo(Mode.REGION, initialFilters);
}
}

View File

@ -0,0 +1,63 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.mode;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Utility class for calculating request counts per second.
*/
@InterfaceAudience.Private
public class RequestCountPerSecond {
private long previousLastReportTimestamp;
private long previousReadRequestCount;
private long previousWriteRequestCount;
private long readRequestCountPerSecond;
private long writeRequestCountPerSecond;
public void refresh(long lastReportTimestamp, long readRequestCount, long writeRequestCount) {
if (previousLastReportTimestamp == 0) {
previousLastReportTimestamp = lastReportTimestamp;
previousReadRequestCount = readRequestCount;
previousWriteRequestCount = writeRequestCount;
} else if (previousLastReportTimestamp != lastReportTimestamp) {
long delta = (lastReportTimestamp - previousLastReportTimestamp) / 1000;
if (delta < 1) {
delta = 1;
}
readRequestCountPerSecond = (readRequestCount - previousReadRequestCount) / delta;
writeRequestCountPerSecond = (writeRequestCount - previousWriteRequestCount) / delta;
previousLastReportTimestamp = lastReportTimestamp;
previousReadRequestCount = readRequestCount;
previousWriteRequestCount = writeRequestCount;
}
}
public long getReadRequestCountPerSecond() {
return readRequestCountPerSecond < 0 ? 0 : readRequestCountPerSecond;
}
public long getWriteRequestCountPerSecond() {
return writeRequestCountPerSecond < 0 ? 0 : writeRequestCountPerSecond;
}
public long getRequestCountPerSecond() {
return getReadRequestCountPerSecond() + getWriteRequestCountPerSecond();
}
}

View File

@ -0,0 +1,108 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.mode;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.RecordFilter;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
/**
* Implementation for {@link ModeStrategy} for Table Mode.
*/
@InterfaceAudience.Private
public final class TableModeStrategy implements ModeStrategy {
private final List<FieldInfo> fieldInfos = Arrays.asList(
new FieldInfo(Field.NAMESPACE, 0, true),
new FieldInfo(Field.TABLE, 0, true),
new FieldInfo(Field.REGION_COUNT, 7, true),
new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true),
new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true),
new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true),
new FieldInfo(Field.STORE_FILE_SIZE, 13, true),
new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false),
new FieldInfo(Field.NUM_STORE_FILES, 7, true),
new FieldInfo(Field.MEM_STORE_SIZE, 11, true)
);
private final RegionModeStrategy regionModeStrategy = new RegionModeStrategy();
TableModeStrategy() {
}
@Override
public List<FieldInfo> getFieldInfos() {
return fieldInfos;
}
@Override
public Field getDefaultSortField() {
return Field.REQUEST_COUNT_PER_SECOND;
}
@Override
public List<Record> getRecords(ClusterStatus clusterStatus) {
// Get records from RegionModeStrategy and add REGION_COUNT field
List<Record> records = new ArrayList<>();
for (Record record : regionModeStrategy.getRecords(clusterStatus)) {
List<Record.Entry> entries = new ArrayList<>();
for (FieldInfo fieldInfo : fieldInfos) {
if (record.containsKey(fieldInfo.getField())) {
entries.add(Record.entry(fieldInfo.getField(),
record.get(fieldInfo.getField())));
}
}
// Add REGION_COUNT field
records.add(Record.builder().putAll(Record.ofEntries(entries))
.put(Field.REGION_COUNT, 1).build());
}
// Aggregation by NAMESPACE field
Map<TableName, Record> retMap = new HashMap<>();
for (Record record : records) {
String namespace = record.get(Field.NAMESPACE).asString();
String table = record.get(Field.TABLE).asString();
TableName tableName = TableName.valueOf(namespace, table);
if (retMap.containsKey(tableName)) {
retMap.put(tableName, retMap.get(tableName).combine(record));
} else {
retMap.put(tableName, record);
}
}
return new ArrayList<>(retMap.values());
}
@Override
public DrillDownInfo drillDown(Record selectedRecord) {
List<RecordFilter> initialFilters = Arrays.asList(
RecordFilter.newBuilder(Field.NAMESPACE).doubleEquals(selectedRecord.get(Field.NAMESPACE)),
RecordFilter.newBuilder(Field.TABLE).doubleEquals(selectedRecord.get(Field.TABLE)));
return new DrillDownInfo(Mode.REGION, initialFilters);
}
}

View File

@ -0,0 +1,102 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize;
/**
* An abstract class for {@link ScreenView} that has the common useful methods and the default
* implementations for the abstract methods.
*/
@InterfaceAudience.Private
public abstract class AbstractScreenView implements ScreenView {
private final Screen screen;
private final Terminal terminal;
public AbstractScreenView(Screen screen, Terminal terminal) {
this.screen = Objects.requireNonNull(screen);
this.terminal = Objects.requireNonNull(terminal);
}
@Override
public void init() {
}
@Override
public ScreenView handleKeyPress(KeyPress keyPress) {
return this;
}
@Override
public ScreenView handleTimer() {
return this;
}
protected Screen getScreen() {
return screen;
}
protected Terminal getTerminal() {
return terminal;
}
protected void setTimer(long delay) {
screen.setTimer(delay);
}
protected void cancelTimer() {
screen.cancelTimer();
}
protected TerminalPrinter getTerminalPrinter(int startRow) {
return terminal.getTerminalPrinter(startRow);
}
protected TerminalSize getTerminalSize() {
return terminal.getSize();
}
@Nullable
protected TerminalSize doResizeIfNecessary() {
return terminal.doResizeIfNecessary();
}
public void clearTerminal() {
terminal.clear();
}
public void refreshTerminal() {
terminal.refresh();
}
public void hideCursor() {
terminal.hideCursor();
}
public void setCursorPosition(int column, int row) {
terminal.setCursorPosition(column, row);
}
}

View File

@ -0,0 +1,132 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.hbtop.mode.Mode;
import org.apache.hadoop.hbase.hbtop.screen.top.TopScreenView;
import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
import org.apache.hadoop.hbase.hbtop.terminal.impl.TerminalImpl;
/**
* This dispatches key presses and timers to the current {@link ScreenView}.
*/
@InterfaceAudience.Private
public class Screen implements Closeable {
private static final Log LOG = LogFactory.getLog(Screen.class);
private static final long SLEEP_TIMEOUT_MILLISECONDS = 100;
private final Connection connection;
private final Admin admin;
private final Terminal terminal;
private ScreenView currentScreenView;
private Long timerTimestamp;
public Screen(Configuration conf, long initialRefreshDelay, Mode initialMode)
throws IOException {
connection = ConnectionFactory.createConnection(conf);
admin = connection.getAdmin();
// The first screen is the top screen
this.terminal = new TerminalImpl("hbtop");
currentScreenView = new TopScreenView(this, terminal, initialRefreshDelay, admin,
initialMode);
}
@Override
public void close() throws IOException {
try {
admin.close();
} finally {
try {
connection.close();
} finally {
terminal.close();
}
}
}
public void run() {
currentScreenView.init();
while (true) {
try {
KeyPress keyPress = terminal.pollKeyPress();
ScreenView nextScreenView;
if (keyPress != null) {
// Dispatch the key press to the current screen
nextScreenView = currentScreenView.handleKeyPress(keyPress);
} else {
if (timerTimestamp != null) {
long now = System.currentTimeMillis();
if (timerTimestamp <= now) {
// Dispatch the timer to the current screen
timerTimestamp = null;
nextScreenView = currentScreenView.handleTimer();
} else {
if (timerTimestamp - now < SLEEP_TIMEOUT_MILLISECONDS) {
TimeUnit.MILLISECONDS.sleep(timerTimestamp - now);
} else {
TimeUnit.MILLISECONDS.sleep(SLEEP_TIMEOUT_MILLISECONDS);
}
continue;
}
} else {
TimeUnit.MILLISECONDS.sleep(SLEEP_TIMEOUT_MILLISECONDS);
continue;
}
}
// If the next screen is null, then exit
if (nextScreenView == null) {
return;
}
// If the next screen is not the previous, then go to the next screen
if (nextScreenView != currentScreenView) {
currentScreenView = nextScreenView;
currentScreenView.init();
}
} catch (Exception e) {
LOG.error("Caught an exception", e);
}
}
}
public void setTimer(long delay) {
timerTimestamp = System.currentTimeMillis() + delay;
}
public void cancelTimer() {
timerTimestamp = null;
}
}

View File

@ -0,0 +1,33 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen;
import edu.umd.cs.findbugs.annotations.Nullable;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
/**
* An interface for a screen view that handles key presses and timers.
*/
@InterfaceAudience.Private
public interface ScreenView {
void init();
@Nullable ScreenView handleKeyPress(KeyPress keyPress);
@Nullable ScreenView handleTimer();
}

View File

@ -0,0 +1,184 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.field;
import java.util.ArrayList;
import java.util.EnumMap;
import java.util.List;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
/**
* The presentation logic for the field screen.
*/
@InterfaceAudience.Private
public class FieldScreenPresenter {
public interface ResultListener {
void accept(Field sortField, List<Field> fields, EnumMap<Field, Boolean> fieldDisplayMap);
}
private final FieldScreenView fieldScreenView;
private Field sortField;
private final List<Field> fields;
private final EnumMap<Field, Boolean> fieldDisplayMap;
private final ResultListener resultListener;
private final ScreenView nextScreenView;
private final int headerMaxLength;
private final int descriptionMaxLength;
private int currentPosition;
private boolean moveMode;
public FieldScreenPresenter(FieldScreenView fieldScreenView, Field sortField, List<Field> fields,
EnumMap<Field, Boolean> fieldDisplayMap, ResultListener resultListener,
ScreenView nextScreenView) {
this.fieldScreenView = Objects.requireNonNull(fieldScreenView);
this.sortField = Objects.requireNonNull(sortField);
this.fields = new ArrayList<>(Objects.requireNonNull(fields));
this.fieldDisplayMap = new EnumMap<>(Objects.requireNonNull(fieldDisplayMap));
this.resultListener = Objects.requireNonNull(resultListener);
this.nextScreenView = Objects.requireNonNull(nextScreenView);
int headerLength = 0;
int descriptionLength = 0;
for (int i = 0; i < fields.size(); i ++) {
Field field = fields.get(i);
if (field == sortField) {
currentPosition = i;
}
if (headerLength < field.getHeader().length()) {
headerLength = field.getHeader().length();
}
if (descriptionLength < field.getDescription().length()) {
descriptionLength = field.getDescription().length();
}
}
headerMaxLength = headerLength;
descriptionMaxLength = descriptionLength;
}
public void init() {
fieldScreenView.hideCursor();
fieldScreenView.clearTerminal();
fieldScreenView.showFieldScreen(sortField.getHeader(), fields, fieldDisplayMap,
currentPosition, headerMaxLength, descriptionMaxLength, moveMode);
fieldScreenView.refreshTerminal();
}
public void arrowUp() {
if (currentPosition > 0) {
currentPosition -= 1;
if (moveMode) {
Field tmp = fields.remove(currentPosition);
fields.add(currentPosition + 1, tmp);
}
showField(currentPosition);
showField(currentPosition + 1);
fieldScreenView.refreshTerminal();
}
}
public void arrowDown() {
if (currentPosition < fields.size() - 1) {
currentPosition += 1;
if (moveMode) {
Field tmp = fields.remove(currentPosition - 1);
fields.add(currentPosition, tmp);
}
showField(currentPosition);
showField(currentPosition - 1);
fieldScreenView.refreshTerminal();
}
}
public void pageUp() {
if (currentPosition > 0 && !moveMode) {
int previousPosition = currentPosition;
currentPosition = 0;
showField(previousPosition);
showField(currentPosition);
fieldScreenView.refreshTerminal();
}
}
public void pageDown() {
if (currentPosition < fields.size() - 1 && !moveMode) {
int previousPosition = currentPosition;
currentPosition = fields.size() - 1;
showField(previousPosition);
showField(currentPosition);
fieldScreenView.refreshTerminal();
}
}
public void turnOnMoveMode() {
moveMode = true;
showField(currentPosition);
fieldScreenView.refreshTerminal();
}
public void turnOffMoveMode() {
moveMode = false;
showField(currentPosition);
fieldScreenView.refreshTerminal();
}
public void switchFieldDisplay() {
if (!moveMode) {
Field field = fields.get(currentPosition);
fieldDisplayMap.put(field, !fieldDisplayMap.get(field));
showField(currentPosition);
fieldScreenView.refreshTerminal();
}
}
private void showField(int pos) {
Field field = fields.get(pos);
fieldScreenView.showField(pos, field, fieldDisplayMap.get(field), pos == currentPosition,
headerMaxLength, descriptionMaxLength, moveMode);
}
public void setSortField() {
if (!moveMode) {
Field newSortField = fields.get(currentPosition);
if (newSortField != this.sortField) {
this.sortField = newSortField;
fieldScreenView.showScreenDescription(sortField.getHeader());
fieldScreenView.refreshTerminal();
}
}
}
public ScreenView transitionToNextScreen() {
resultListener.accept(sortField, fields, fieldDisplayMap);
return nextScreenView;
}
}

View File

@ -0,0 +1,193 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.field;
import java.util.EnumMap;
import java.util.List;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.screen.AbstractScreenView;
import org.apache.hadoop.hbase.hbtop.screen.Screen;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
/**
* The screen where we can change the displayed fields, the sort key and the order of the fields.
*/
@InterfaceAudience.Private
public class FieldScreenView extends AbstractScreenView {
private static final int SCREEN_DESCRIPTION_START_ROW = 0;
private static final int FIELD_START_ROW = 5;
private final FieldScreenPresenter fieldScreenPresenter;
public FieldScreenView(Screen screen, Terminal terminal, Field sortField, List<Field> fields,
EnumMap<Field, Boolean> fieldDisplayMap, FieldScreenPresenter.ResultListener resultListener,
ScreenView nextScreenView) {
super(screen, terminal);
this.fieldScreenPresenter = new FieldScreenPresenter(this, sortField, fields, fieldDisplayMap,
resultListener, nextScreenView);
}
@Override
public void init() {
fieldScreenPresenter.init();
}
@Override
public ScreenView handleKeyPress(KeyPress keyPress) {
switch (keyPress.getType()) {
case Escape:
return fieldScreenPresenter.transitionToNextScreen();
case ArrowUp:
fieldScreenPresenter.arrowUp();
return this;
case ArrowDown:
fieldScreenPresenter.arrowDown();
return this;
case PageUp:
case Home:
fieldScreenPresenter.pageUp();
return this;
case PageDown:
case End:
fieldScreenPresenter.pageDown();
return this;
case ArrowRight:
fieldScreenPresenter.turnOnMoveMode();
return this;
case ArrowLeft:
case Enter:
fieldScreenPresenter.turnOffMoveMode();
return this;
default:
// Do nothing
break;
}
if (keyPress.getType() != KeyPress.Type.Character) {
return this;
}
assert keyPress.getCharacter() != null;
switch (keyPress.getCharacter()) {
case 'd':
case ' ':
fieldScreenPresenter.switchFieldDisplay();
break;
case 's':
fieldScreenPresenter.setSortField();
break;
case 'q':
return fieldScreenPresenter.transitionToNextScreen();
default:
// Do nothing
break;
}
return this;
}
public void showFieldScreen(String sortFieldHeader, List<Field> fields,
EnumMap<Field, Boolean> fieldDisplayMap, int currentPosition, int headerMaxLength,
int descriptionMaxLength, boolean moveMode) {
showScreenDescription(sortFieldHeader);
for (int i = 0; i < fields.size(); i ++) {
Field field = fields.get(i);
showField(i, field, fieldDisplayMap.get(field), i == currentPosition, headerMaxLength,
descriptionMaxLength, moveMode);
}
}
public void showScreenDescription(String sortKeyHeader) {
TerminalPrinter printer = getTerminalPrinter(SCREEN_DESCRIPTION_START_ROW);
printer.startBold().print("Fields Management").stopBold().endOfLine();
printer.print("Current Sort Field: ").startBold().print(sortKeyHeader).stopBold().endOfLine();
printer.print("Navigate with up/down, Right selects for move then <Enter> or Left commits,")
.endOfLine();
printer.print("'d' or <Space> toggles display, 's' sets sort. Use 'q' or <Esc> to end!")
.endOfLine();
}
public void showField(int pos, Field field, boolean display, boolean selected,
int fieldHeaderMaxLength, int fieldDescriptionMaxLength, boolean moveMode) {
String fieldHeader = String.format("%-" + fieldHeaderMaxLength + "s", field.getHeader());
String fieldDescription = String.format("%-" + fieldDescriptionMaxLength + "s",
field.getDescription());
int row = FIELD_START_ROW + pos;
TerminalPrinter printer = getTerminalPrinter(row);
if (selected) {
String prefix = display ? "* " : " ";
if (moveMode) {
printer.print(prefix);
if (display) {
printer.startBold();
}
printer.startHighlight()
.printFormat("%s = %s", fieldHeader, fieldDescription).stopHighlight();
if (display) {
printer.stopBold();
}
printer.endOfLine();
} else {
printer.print(prefix);
if (display) {
printer.startBold();
}
printer.startHighlight().print(fieldHeader).stopHighlight()
.printFormat(" = %s", fieldDescription);
if (display) {
printer.stopBold();
}
printer.endOfLine();
}
} else {
if (display) {
printer.print("* ").startBold().printFormat("%s = %s", fieldHeader, fieldDescription)
.stopBold().endOfLine();
} else {
printer.printFormat(" %s = %s", fieldHeader, fieldDescription).endOfLine();
}
}
}
}

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.help;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Represents a description of a command that we can execute in the top screen.
*/
@InterfaceAudience.Private
public class CommandDescription {
private final List<String> keys;
private final String description;
public CommandDescription(String key, String description) {
this(Collections.singletonList(Objects.requireNonNull(key)), description);
}
public CommandDescription(List<String> keys, String description) {
this.keys = Collections.unmodifiableList(new ArrayList<>(Objects.requireNonNull(keys)));
this.description = Objects.requireNonNull(description);
}
public List<String> getKeys() {
return keys;
}
public String getDescription() {
return description;
}
}

View File

@ -0,0 +1,72 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.help;
import java.util.Arrays;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
/**
* The presentation logic for the help screen.
*/
@InterfaceAudience.Private
public class HelpScreenPresenter {
private static final CommandDescription[] COMMAND_DESCRIPTIONS = new CommandDescription[] {
new CommandDescription("f", "Add/Remove/Order/Sort the fields"),
new CommandDescription("R", "Toggle the sort order (ascending/descending)"),
new CommandDescription("m", "Select mode"),
new CommandDescription("o", "Add a filter with ignoring case"),
new CommandDescription("O", "Add a filter with case sensitive"),
new CommandDescription("^o", "Show the current filters"),
new CommandDescription("=", "Clear the current filters"),
new CommandDescription("i", "Drill down"),
new CommandDescription(
Arrays.asList("up", "down", "left", "right", "pageUp", "pageDown", "home", "end"),
"Scroll the metrics"),
new CommandDescription("d", "Change the refresh delay"),
new CommandDescription("X", "Adjust the field length"),
new CommandDescription("<Enter>", "Refresh the display"),
new CommandDescription("h", "Display this screen"),
new CommandDescription(Arrays.asList("q", "<Esc>"), "Quit")
};
private final HelpScreenView helpScreenView;
private final long refreshDelay;
private final ScreenView nextScreenView;
public HelpScreenPresenter(HelpScreenView helpScreenView, long refreshDelay,
ScreenView nextScreenView) {
this.helpScreenView = Objects.requireNonNull(helpScreenView);
this.refreshDelay = refreshDelay;
this.nextScreenView = Objects.requireNonNull(nextScreenView);
}
public void init() {
helpScreenView.hideCursor();
helpScreenView.clearTerminal();
helpScreenView.showHelpScreen(refreshDelay, COMMAND_DESCRIPTIONS);
helpScreenView.refreshTerminal();
}
public ScreenView transitionToNextScreen() {
return nextScreenView;
}
}

View File

@ -0,0 +1,89 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.help;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.screen.AbstractScreenView;
import org.apache.hadoop.hbase.hbtop.screen.Screen;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
/**
* The help screen.
*/
@InterfaceAudience.Private
public class HelpScreenView extends AbstractScreenView {
private static final int SCREEN_DESCRIPTION_START_ROW = 0;
private static final int COMMAND_DESCRIPTION_START_ROW = 3;
private final HelpScreenPresenter helpScreenPresenter;
public HelpScreenView(Screen screen, Terminal terminal, long refreshDelay,
ScreenView nextScreenView) {
super(screen, terminal);
this.helpScreenPresenter = new HelpScreenPresenter(this, refreshDelay, nextScreenView);
}
@Override
public void init() {
helpScreenPresenter.init();
}
@Override
public ScreenView handleKeyPress(KeyPress keyPress) {
return helpScreenPresenter.transitionToNextScreen();
}
public void showHelpScreen(long refreshDelay, CommandDescription[] commandDescriptions) {
showScreenDescription(refreshDelay);
TerminalPrinter printer = getTerminalPrinter(COMMAND_DESCRIPTION_START_ROW);
for (CommandDescription commandDescription : commandDescriptions) {
showCommandDescription(printer, commandDescription);
}
printer.endOfLine();
printer.print("Press any key to continue").endOfLine();
}
private void showScreenDescription(long refreshDelay) {
TerminalPrinter printer = getTerminalPrinter(SCREEN_DESCRIPTION_START_ROW);
printer.startBold().print("Help for Interactive Commands").stopBold().endOfLine();
printer.print("Refresh delay: ").startBold()
.print((double) refreshDelay / 1000).stopBold().endOfLine();
}
private void showCommandDescription(TerminalPrinter terminalPrinter,
CommandDescription commandDescription) {
terminalPrinter.print(" ");
boolean first = true;
for (String key : commandDescription.getKeys()) {
if (first) {
first = false;
} else {
terminalPrinter.print(",");
}
terminalPrinter.startBold().print(key).stopBold();
}
terminalPrinter.printFormat(": %s", commandDescription.getDescription()).endOfLine();
}
}

View File

@ -0,0 +1,134 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.mode;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.mode.Mode;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
/**
* The presentation logic for the mode screen.
*/
@InterfaceAudience.Private
public class ModeScreenPresenter {
public interface ResultListener {
void accept(Mode mode);
}
private final ModeScreenView modeScreenView;
private final Mode currentMode;
private final ResultListener resultListener;
private final ScreenView nextScreenView;
private final int modeHeaderMaxLength;
private final int modeDescriptionMaxLength;
private final List<Mode> modes = Arrays.asList(Mode.values());
private int currentPosition;
public ModeScreenPresenter(ModeScreenView modeScreenView, Mode currentMode,
ResultListener resultListener, ScreenView nextScreenView) {
this.modeScreenView = Objects.requireNonNull(modeScreenView);
this.currentMode = Objects.requireNonNull(currentMode);
this.resultListener = Objects.requireNonNull(resultListener);
this.nextScreenView = Objects.requireNonNull(nextScreenView);
int modeHeaderLength = 0;
int modeDescriptionLength = 0;
for (int i = 0; i < modes.size(); i++) {
Mode mode = modes.get(i);
if (mode == currentMode) {
currentPosition = i;
}
if (modeHeaderLength < mode.getHeader().length()) {
modeHeaderLength = mode.getHeader().length();
}
if (modeDescriptionLength < mode.getDescription().length()) {
modeDescriptionLength = mode.getDescription().length();
}
}
modeHeaderMaxLength = modeHeaderLength;
modeDescriptionMaxLength = modeDescriptionLength;
}
public void init() {
modeScreenView.hideCursor();
modeScreenView.clearTerminal();
modeScreenView.showModeScreen(currentMode, modes, currentPosition, modeHeaderMaxLength,
modeDescriptionMaxLength);
modeScreenView.refreshTerminal();
}
public void arrowUp() {
if (currentPosition > 0) {
currentPosition -= 1;
showMode(currentPosition);
showMode(currentPosition + 1);
modeScreenView.refreshTerminal();
}
}
public void arrowDown() {
if (currentPosition < modes.size() - 1) {
currentPosition += 1;
showMode(currentPosition);
showMode(currentPosition - 1);
modeScreenView.refreshTerminal();
}
}
public void pageUp() {
if (currentPosition > 0) {
int previousPosition = currentPosition;
currentPosition = 0;
showMode(previousPosition);
showMode(currentPosition);
modeScreenView.refreshTerminal();
}
}
public void pageDown() {
if (currentPosition < modes.size() - 1) {
int previousPosition = currentPosition;
currentPosition = modes.size() - 1;
showMode(previousPosition);
showMode(currentPosition);
modeScreenView.refreshTerminal();
}
}
private void showMode(int pos) {
modeScreenView.showMode(pos, modes.get(pos), pos == currentPosition, modeHeaderMaxLength,
modeDescriptionMaxLength);
}
public ScreenView transitionToNextScreen(boolean changeMode) {
Mode selectedMode = modes.get(currentPosition);
if (changeMode && currentMode != selectedMode) {
resultListener.accept(selectedMode);
}
return nextScreenView;
}
}

View File

@ -0,0 +1,136 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.mode;
import java.util.List;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.mode.Mode;
import org.apache.hadoop.hbase.hbtop.screen.AbstractScreenView;
import org.apache.hadoop.hbase.hbtop.screen.Screen;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
/**
* The screen where we can choose the {@link Mode} in the top screen.
*/
@InterfaceAudience.Private
public class ModeScreenView extends AbstractScreenView {
private static final int SCREEN_DESCRIPTION_START_ROW = 0;
private static final int MODE_START_ROW = 4;
private final ModeScreenPresenter modeScreenPresenter;
public ModeScreenView(Screen screen, Terminal terminal, Mode currentMode,
ModeScreenPresenter.ResultListener resultListener, ScreenView nextScreenView) {
super(screen, terminal);
this.modeScreenPresenter = new ModeScreenPresenter(this, currentMode, resultListener,
nextScreenView);
}
@Override
public void init() {
modeScreenPresenter.init();
}
@Override
public ScreenView handleKeyPress(KeyPress keyPress) {
switch (keyPress.getType()) {
case Escape:
return modeScreenPresenter.transitionToNextScreen(false);
case Enter:
return modeScreenPresenter.transitionToNextScreen(true);
case ArrowUp:
modeScreenPresenter.arrowUp();
return this;
case ArrowDown:
modeScreenPresenter.arrowDown();
return this;
case PageUp:
case Home:
modeScreenPresenter.pageUp();
return this;
case PageDown:
case End:
modeScreenPresenter.pageDown();
return this;
default:
// Do nothing
break;
}
if (keyPress.getType() != KeyPress.Type.Character) {
return this;
}
assert keyPress.getCharacter() != null;
switch (keyPress.getCharacter()) {
case 'q':
return modeScreenPresenter.transitionToNextScreen(false);
default:
// Do nothing
break;
}
return this;
}
public void showModeScreen(Mode currentMode, List<Mode> modes, int currentPosition,
int modeHeaderMaxLength, int modeDescriptionMaxLength) {
showScreenDescription(currentMode);
for (int i = 0; i < modes.size(); i++) {
showMode(i, modes.get(i), i == currentPosition,
modeHeaderMaxLength, modeDescriptionMaxLength);
}
}
private void showScreenDescription(Mode currentMode) {
TerminalPrinter printer = getTerminalPrinter(SCREEN_DESCRIPTION_START_ROW);
printer.startBold().print("Mode Management").stopBold().endOfLine();
printer.print("Current mode: ")
.startBold().print(currentMode.getHeader()).stopBold().endOfLine();
printer.print("Select mode followed by <Enter>").endOfLine();
}
public void showMode(int pos, Mode mode, boolean selected, int modeHeaderMaxLength,
int modeDescriptionMaxLength) {
String modeHeader = String.format("%-" + modeHeaderMaxLength + "s", mode.getHeader());
String modeDescription = String.format("%-" + modeDescriptionMaxLength + "s",
mode.getDescription());
int row = MODE_START_ROW + pos;
TerminalPrinter printer = getTerminalPrinter(row);
if (selected) {
printer.startHighlight().print(modeHeader).stopHighlight()
.printFormat(" = %s", modeDescription).endOfLine();
} else {
printer.printFormat("%s = %s", modeHeader, modeDescription).endOfLine();
}
}
}

View File

@ -0,0 +1,53 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.RecordFilter;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
/**
* The presentation logic for the filter display mode.
*/
@InterfaceAudience.Private
public class FilterDisplayModeScreenPresenter {
private final FilterDisplayModeScreenView filterDisplayModeScreenView;
private final List<RecordFilter> filters;
private final ScreenView nextScreenView;
public FilterDisplayModeScreenPresenter(FilterDisplayModeScreenView filterDisplayModeScreenView,
List<RecordFilter> filters, ScreenView nextScreenView) {
this.filterDisplayModeScreenView = Objects.requireNonNull(filterDisplayModeScreenView);
this.filters = Collections.unmodifiableList(new ArrayList<>(Objects.requireNonNull(filters)));
this.nextScreenView = Objects.requireNonNull(nextScreenView);
}
public void init() {
filterDisplayModeScreenView.showFilters(filters);
filterDisplayModeScreenView.refreshTerminal();
}
public ScreenView returnToNextScreen() {
return nextScreenView;
}
}

View File

@ -0,0 +1,77 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.RecordFilter;
import org.apache.hadoop.hbase.hbtop.screen.AbstractScreenView;
import org.apache.hadoop.hbase.hbtop.screen.Screen;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
/**
* The filter display mode in the top screen.
*
* Exit if Enter key is pressed.
*/
@InterfaceAudience.Private
public class FilterDisplayModeScreenView extends AbstractScreenView {
private final int row;
private final FilterDisplayModeScreenPresenter filterDisplayModeScreenPresenter;
public FilterDisplayModeScreenView(Screen screen, Terminal terminal, int row,
List<RecordFilter> filters, ScreenView nextScreenView) {
super(screen, terminal);
this.row = row;
this.filterDisplayModeScreenPresenter =
new FilterDisplayModeScreenPresenter(this, filters, nextScreenView);
}
@Override
public void init() {
filterDisplayModeScreenPresenter.init();
}
@Override
public ScreenView handleKeyPress(KeyPress keyPress) {
if (keyPress.getType() == KeyPress.Type.Enter) {
return filterDisplayModeScreenPresenter.returnToNextScreen();
}
return this;
}
public void showFilters(List<RecordFilter> filters) {
String filtersString = "none";
if (!filters.isEmpty()) {
List<String> filterStrings = new ArrayList<>();
for (RecordFilter filter : filters) {
filterStrings.add(String.format("'%s'", filter));
}
filtersString = StringUtils.join(filterStrings, " + ");
}
getTerminalPrinter(row).startBold().print("<Enter> to resume, filters: " + filtersString)
.stopBold().endOfLine();
}
}

View File

@ -0,0 +1,48 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.field.Field;
/**
* Represents headers for the metrics in the top screen.
*/
@InterfaceAudience.Private
public class Header {
private final Field field;
private final int length;
public Header(Field field, int length) {
this.field = Objects.requireNonNull(field);
this.length = length;
}
public String format() {
return "%" + (field.isLeftJustify() ? "-" : "") + length + "s";
}
public Field getField() {
return field;
}
public int getLength() {
return length;
}
}

View File

@ -0,0 +1,168 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
/**
* The presentation logic for the input mode.
*/
@InterfaceAudience.Private
public class InputModeScreenPresenter {
public interface ResultListener {
public ScreenView apply(String inputString);
}
private final InputModeScreenView inputModeScreenView;
private final String message;
private final List<String> histories;
private final ResultListener resultListener;
private StringBuilder inputString = new StringBuilder();
private int cursorPosition;
private int historyPosition = -1;
public InputModeScreenPresenter(InputModeScreenView inputModeScreenView, String message,
@Nullable List<String> histories, ResultListener resultListener) {
this.inputModeScreenView = Objects.requireNonNull(inputModeScreenView);
this.message = Objects.requireNonNull(message);
if (histories != null) {
this.histories = Collections.unmodifiableList(new ArrayList<>(histories));
} else {
this.histories = Collections.emptyList();
}
this.resultListener = Objects.requireNonNull(resultListener);
}
public void init() {
inputModeScreenView.showInput(message, inputString.toString(), cursorPosition);
inputModeScreenView.refreshTerminal();
}
public ScreenView returnToNextScreen() {
inputModeScreenView.hideCursor();
String result = inputString.toString();
return resultListener.apply(result);
}
public void character(Character character) {
inputString.insert(cursorPosition, character);
cursorPosition += 1;
inputModeScreenView.showInput(message, inputString.toString(), cursorPosition);
inputModeScreenView.refreshTerminal();
}
public void backspace() {
if (cursorPosition == 0) {
return;
}
inputString.deleteCharAt(cursorPosition - 1);
cursorPosition -= 1;
inputModeScreenView.showInput(message, inputString.toString(), cursorPosition);
inputModeScreenView.refreshTerminal();
}
public void delete() {
if (inputString.length() == 0 || cursorPosition > inputString.length() - 1) {
return;
}
inputString.deleteCharAt(cursorPosition);
inputModeScreenView.showInput(message, inputString.toString(), cursorPosition);
inputModeScreenView.refreshTerminal();
}
public void arrowLeft() {
if (cursorPosition == 0) {
return;
}
cursorPosition -= 1;
inputModeScreenView.showInput(message, inputString.toString(), cursorPosition);
inputModeScreenView.refreshTerminal();
}
public void arrowRight() {
if (cursorPosition > inputString.length() - 1) {
return;
}
cursorPosition += 1;
inputModeScreenView.showInput(message, inputString.toString(), cursorPosition);
inputModeScreenView.refreshTerminal();
}
public void home() {
cursorPosition = 0;
inputModeScreenView.showInput(message, inputString.toString(), cursorPosition);
inputModeScreenView.refreshTerminal();
}
public void end() {
cursorPosition = inputString.length();
inputModeScreenView.showInput(message, inputString.toString(), cursorPosition);
inputModeScreenView.refreshTerminal();
}
public void arrowUp() {
if (historyPosition == 0 || histories.isEmpty()) {
return;
}
if (historyPosition == -1) {
historyPosition = histories.size() - 1;
} else {
historyPosition -= 1;
}
inputString = new StringBuilder(histories.get(historyPosition));
cursorPosition = inputString.length();
inputModeScreenView.showInput(message, inputString.toString(), cursorPosition);
inputModeScreenView.refreshTerminal();
}
public void arrowDown() {
if (historyPosition == -1 || histories.isEmpty()) {
return;
}
if (historyPosition == histories.size() - 1) {
historyPosition = -1;
inputString = new StringBuilder();
} else {
historyPosition += 1;
inputString = new StringBuilder(histories.get(historyPosition));
}
cursorPosition = inputString.length();
inputModeScreenView.showInput(message, inputString.toString(), cursorPosition);
inputModeScreenView.refreshTerminal();
}
}

View File

@ -0,0 +1,105 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import java.util.List;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.screen.AbstractScreenView;
import org.apache.hadoop.hbase.hbtop.screen.Screen;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
/**
* The input mode in the top screen.
*/
@InterfaceAudience.Private
public class InputModeScreenView extends AbstractScreenView {
private final int row;
private final InputModeScreenPresenter inputModeScreenPresenter;
public InputModeScreenView(Screen screen, Terminal terminal, int row, String message,
List<String> histories, InputModeScreenPresenter.ResultListener resultListener) {
super(screen, terminal);
this.row = row;
this.inputModeScreenPresenter = new InputModeScreenPresenter(this, message, histories,
resultListener);
}
@Override
public void init() {
inputModeScreenPresenter.init();
}
@Override
public ScreenView handleKeyPress(KeyPress keyPress) {
switch (keyPress.getType()) {
case Enter:
return inputModeScreenPresenter.returnToNextScreen();
case Character:
inputModeScreenPresenter.character(keyPress.getCharacter());
break;
case Backspace:
inputModeScreenPresenter.backspace();
break;
case Delete:
inputModeScreenPresenter.delete();
break;
case ArrowLeft:
inputModeScreenPresenter.arrowLeft();
break;
case ArrowRight:
inputModeScreenPresenter.arrowRight();
break;
case Home:
inputModeScreenPresenter.home();
break;
case End:
inputModeScreenPresenter.end();
break;
case ArrowUp:
inputModeScreenPresenter.arrowUp();
break;
case ArrowDown:
inputModeScreenPresenter.arrowDown();
break;
default:
break;
}
return this;
}
public void showInput(String message, String inputString, int cursorPosition) {
getTerminalPrinter(row).startBold().print(message).stopBold().print(" ").print(inputString)
.endOfLine();
setCursorPosition(message.length() + 1 + cursorPosition, row);
refreshTerminal();
}
}

View File

@ -0,0 +1,51 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
/**
* The presentation logic for the message mode.
*
* Exit after 2 seconds or if any key is pressed.
*/
@InterfaceAudience.Private
public class MessageModeScreenPresenter {
private final MessageModeScreenView messageModeScreenView;
private final String message;
private final ScreenView nextScreenView;
public MessageModeScreenPresenter(MessageModeScreenView messageModeScreenView, String message,
ScreenView nextScreenView) {
this.messageModeScreenView = Objects.requireNonNull(messageModeScreenView);
this.message = Objects.requireNonNull(message);
this.nextScreenView = Objects.requireNonNull(nextScreenView);
}
public void init() {
messageModeScreenView.showMessage(message);
messageModeScreenView.refreshTerminal();
}
public ScreenView returnToNextScreen() {
return nextScreenView;
}
}

View File

@ -0,0 +1,65 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.screen.AbstractScreenView;
import org.apache.hadoop.hbase.hbtop.screen.Screen;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
/**
* The message mode in the top screen.
*/
@InterfaceAudience.Private
public class MessageModeScreenView extends AbstractScreenView {
private final int row;
private final MessageModeScreenPresenter messageModeScreenPresenter;
public MessageModeScreenView(Screen screen, Terminal terminal, int row, String message,
ScreenView nextScreenView) {
super(screen, terminal);
this.row = row;
this.messageModeScreenPresenter =
new MessageModeScreenPresenter(this, message, nextScreenView);
}
@Override
public void init() {
messageModeScreenPresenter.init();
setTimer(2000);
}
@Override
public ScreenView handleTimer() {
return messageModeScreenPresenter.returnToNextScreen();
}
@Override
public ScreenView handleKeyPress(KeyPress keyPress) {
cancelTimer();
return messageModeScreenPresenter.returnToNextScreen();
}
public void showMessage(String message) {
getTerminalPrinter(row).startHighlight().print(" ").print(message).print(" ").stopHighlight()
.endOfLine();
}
}

View File

@ -0,0 +1,151 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Utility class for paging for the metrics.
*/
@InterfaceAudience.Private
public class Paging {
private int currentPosition;
private int pageStartPosition;
private int pageEndPosition;
private int pageSize;
private int recordsSize;
public void init() {
currentPosition = 0;
pageStartPosition = 0;
pageEndPosition = Math.min(pageSize, recordsSize);
}
public void updatePageSize(int pageSize) {
this.pageSize = pageSize;
if (pageSize == 0) {
pageStartPosition = 0;
pageEndPosition = 0;
} else {
pageEndPosition = pageStartPosition + pageSize;
keepConsistent();
}
}
public void updateRecordsSize(int recordsSize) {
if (this.recordsSize == 0) {
currentPosition = 0;
pageStartPosition = 0;
pageEndPosition = Math.min(pageSize, recordsSize);
this.recordsSize = recordsSize;
} else if (recordsSize == 0) {
currentPosition = 0;
pageStartPosition = 0;
pageEndPosition = 0;
this.recordsSize = recordsSize;
} else {
this.recordsSize = recordsSize;
if (pageSize > 0) {
pageEndPosition = pageStartPosition + pageSize;
keepConsistent();
}
}
}
public void arrowUp() {
if (currentPosition > 0) {
currentPosition -= 1;
if (pageSize > 0) {
keepConsistent();
}
}
}
public void arrowDown() {
if (currentPosition < recordsSize - 1) {
currentPosition += 1;
if (pageSize > 0) {
keepConsistent();
}
}
}
public void pageUp() {
if (pageSize > 0 && currentPosition > 0) {
currentPosition -= pageSize;
if (currentPosition < 0) {
currentPosition = 0;
}
keepConsistent();
}
}
public void pageDown() {
if (pageSize > 0 && currentPosition < recordsSize - 1) {
currentPosition = currentPosition + pageSize;
if (currentPosition >= recordsSize) {
currentPosition = recordsSize - 1;
}
pageStartPosition = currentPosition;
pageEndPosition = pageStartPosition + pageSize;
keepConsistent();
}
}
private void keepConsistent() {
if (currentPosition < pageStartPosition) {
pageStartPosition = currentPosition;
pageEndPosition = pageStartPosition + pageSize;
} else if (currentPosition > recordsSize - 1) {
currentPosition = recordsSize - 1;
pageEndPosition = recordsSize;
pageStartPosition = pageEndPosition - pageSize;
} else if (currentPosition > pageEndPosition - 1) {
pageEndPosition = currentPosition + 1;
pageStartPosition = pageEndPosition - pageSize;
}
if (pageStartPosition < 0) {
pageStartPosition = 0;
}
if (pageEndPosition > recordsSize) {
pageEndPosition = recordsSize;
pageStartPosition = pageEndPosition - pageSize;
if (pageStartPosition < 0) {
pageStartPosition = 0;
}
}
}
public int getCurrentPosition() {
return currentPosition;
}
public int getPageStartPosition() {
return pageStartPosition;
}
public int getPageEndPosition() {
return pageEndPosition;
}
}

View File

@ -0,0 +1,93 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Represents the summary of the metrics.
*/
@InterfaceAudience.Private
public class Summary {
private final String currentTime;
private final String version;
private final String clusterId;
private final int servers;
private final int liveServers;
private final int deadServers;
private final int regionCount;
private final int ritCount;
private final double averageLoad;
private final long aggregateRequestPerSecond;
public Summary(String currentTime, String version, String clusterId, int servers,
int liveServers, int deadServers, int regionCount, int ritCount, double averageLoad,
long aggregateRequestPerSecond) {
this.currentTime = Objects.requireNonNull(currentTime);
this.version = Objects.requireNonNull(version);
this.clusterId = Objects.requireNonNull(clusterId);
this.servers = servers;
this.liveServers = liveServers;
this.deadServers = deadServers;
this.regionCount = regionCount;
this.ritCount = ritCount;
this.averageLoad = averageLoad;
this.aggregateRequestPerSecond = aggregateRequestPerSecond;
}
public String getCurrentTime() {
return currentTime;
}
public String getVersion() {
return version;
}
public String getClusterId() {
return clusterId;
}
public int getServers() {
return servers;
}
public int getLiveServers() {
return liveServers;
}
public int getDeadServers() {
return deadServers;
}
public int getRegionCount() {
return regionCount;
}
public int getRitCount() {
return ritCount;
}
public double getAverageLoad() {
return averageLoad;
}
public long getAggregateRequestPerSecond() {
return aggregateRequestPerSecond;
}
}

View File

@ -0,0 +1,235 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Objects;
import org.apache.commons.lang3.time.DateFormatUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.RecordFilter;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
import org.apache.hadoop.hbase.hbtop.field.FieldValue;
import org.apache.hadoop.hbase.hbtop.mode.DrillDownInfo;
import org.apache.hadoop.hbase.hbtop.mode.Mode;
/**
* The data and business logic for the top screen.
*/
@InterfaceAudience.Private
public class TopScreenModel {
private static final Log LOG = LogFactory.getLog(TopScreenModel.class);
private final Admin admin;
private Mode currentMode;
private Field currentSortField;
private List<FieldInfo> fieldInfos;
private List<Field> fields;
private Summary summary;
private List<Record> records;
private final List<RecordFilter> filters = new ArrayList<>();
private final List<String> filterHistories = new ArrayList<>();
private boolean ascendingSort;
public TopScreenModel(Admin admin, Mode initialMode) {
this.admin = Objects.requireNonNull(admin);
switchMode(Objects.requireNonNull(initialMode), null, false);
}
public void switchMode(Mode nextMode, List<RecordFilter> initialFilters,
boolean keepSortFieldAndSortOrderIfPossible) {
currentMode = nextMode;
fieldInfos = Collections.unmodifiableList(new ArrayList<>(currentMode.getFieldInfos()));
fields = new ArrayList<>();
for (FieldInfo fieldInfo : currentMode.getFieldInfos()) {
fields.add(fieldInfo.getField());
}
fields = Collections.unmodifiableList(fields);
if (keepSortFieldAndSortOrderIfPossible) {
boolean match = false;
for (Field field : fields) {
if (field == currentSortField) {
match = true;
break;
}
}
if (!match) {
currentSortField = nextMode.getDefaultSortField();
ascendingSort = false;
}
} else {
currentSortField = nextMode.getDefaultSortField();
ascendingSort = false;
}
clearFilters();
if (initialFilters != null) {
filters.addAll(initialFilters);
}
}
public void setSortFieldAndFields(Field sortField, List<Field> fields) {
this.currentSortField = sortField;
this.fields = Collections.unmodifiableList(new ArrayList<>(fields));
}
/*
* HBTop only calls this from a single thread, and if that ever changes, this needs
* synchronization
*/
public void refreshMetricsData() {
ClusterStatus clusterStatus;
try {
clusterStatus = admin.getClusterStatus();
} catch (Exception e) {
LOG.error("Unable to get cluster status", e);
return;
}
refreshSummary(clusterStatus);
refreshRecords(clusterStatus);
}
private void refreshSummary(ClusterStatus clusterStatus) {
String currentTime = DateFormatUtils.ISO_8601_EXTENDED_TIME_FORMAT
.format(System.currentTimeMillis());
String version = clusterStatus.getHBaseVersion();
String clusterId = clusterStatus.getClusterId();
int liveServers = clusterStatus.getServersSize();
int deadServers = clusterStatus.getDeadServerNames().size();
int regionCount = clusterStatus.getRegionsCount();
int ritCount = clusterStatus.getRegionsInTransition().size();
double averageLoad = clusterStatus.getAverageLoad();
long aggregateRequestPerSecond = 0;
for (ServerName sn: clusterStatus.getServers()) {
ServerLoad sl = clusterStatus.getLoad(sn);
aggregateRequestPerSecond += sl.getNumberOfRequests();
}
summary = new Summary(currentTime, version, clusterId, liveServers + deadServers,
liveServers, deadServers, regionCount, ritCount, averageLoad, aggregateRequestPerSecond);
}
private void refreshRecords(ClusterStatus clusterStatus) {
// Filter
List<Record> records = new ArrayList<>();
for (Record record : currentMode.getRecords(clusterStatus)) {
boolean filter = false;
for (RecordFilter recordFilter : filters) {
if (!recordFilter.execute(record)) {
filter = true;
break;
}
}
if (!filter) {
records.add(record);
}
}
// Sort
Collections.sort(records, new Comparator<Record>() {
@Override
public int compare(Record recordLeft, Record recordRight) {
FieldValue left = recordLeft.get(currentSortField);
FieldValue right = recordRight.get(currentSortField);
return (ascendingSort ? 1 : -1) * left.compareTo(right);
}
});
this.records = Collections.unmodifiableList(records);
}
public void switchSortOrder() {
ascendingSort = !ascendingSort;
}
public boolean addFilter(String filterString, boolean ignoreCase) {
RecordFilter filter = RecordFilter.parse(filterString, fields, ignoreCase);
if (filter == null) {
return false;
}
filters.add(filter);
filterHistories.add(filterString);
return true;
}
public void clearFilters() {
filters.clear();
}
public boolean drillDown(Record selectedRecord) {
DrillDownInfo drillDownInfo = currentMode.drillDown(selectedRecord);
if (drillDownInfo == null) {
return false;
}
switchMode(drillDownInfo.getNextMode(), drillDownInfo.getInitialFilters(), true);
return true;
}
public Mode getCurrentMode() {
return currentMode;
}
public Field getCurrentSortField() {
return currentSortField;
}
public List<FieldInfo> getFieldInfos() {
return fieldInfos;
}
public List<Field> getFields() {
return fields;
}
public Summary getSummary() {
return summary;
}
public List<Record> getRecords() {
return records;
}
public List<RecordFilter> getFilters() {
return Collections.unmodifiableList(filters);
}
public List<String> getFilterHistories() {
return Collections.unmodifiableList(filterHistories);
}
}

View File

@ -0,0 +1,356 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import java.util.ArrayList;
import java.util.EnumMap;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
import org.apache.hadoop.hbase.hbtop.mode.Mode;
import org.apache.hadoop.hbase.hbtop.screen.Screen;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
import org.apache.hadoop.hbase.hbtop.screen.field.FieldScreenPresenter;
import org.apache.hadoop.hbase.hbtop.screen.field.FieldScreenView;
import org.apache.hadoop.hbase.hbtop.screen.help.HelpScreenView;
import org.apache.hadoop.hbase.hbtop.screen.mode.ModeScreenPresenter;
import org.apache.hadoop.hbase.hbtop.screen.mode.ModeScreenView;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize;
/**
* The presentation logic for the top screen.
*/
@InterfaceAudience.Private
public class TopScreenPresenter {
private final TopScreenView topScreenView;
private final AtomicLong refreshDelay;
private long lastRefreshTimestamp;
private final AtomicBoolean adjustFieldLength = new AtomicBoolean(true);
private final TopScreenModel topScreenModel;
private int terminalLength;
private int horizontalScroll;
private final Paging paging = new Paging();
private final EnumMap<Field, Boolean> fieldDisplayMap = new EnumMap<>(Field.class);
private final EnumMap<Field, Integer> fieldLengthMap = new EnumMap<>(Field.class);
public TopScreenPresenter(TopScreenView topScreenView, long initialRefreshDelay,
TopScreenModel topScreenModel) {
this.topScreenView = Objects.requireNonNull(topScreenView);
this.refreshDelay = new AtomicLong(initialRefreshDelay);
this.topScreenModel = Objects.requireNonNull(topScreenModel);
initFieldDisplayMapAndFieldLengthMap();
}
public void init() {
terminalLength = topScreenView.getTerminalSize().getColumns();
paging.updatePageSize(topScreenView.getPageSize());
topScreenView.hideCursor();
}
public long refresh(boolean force) {
if (!force) {
long delay = System.currentTimeMillis() - lastRefreshTimestamp;
if (delay < refreshDelay.get()) {
return refreshDelay.get() - delay;
}
}
TerminalSize newTerminalSize = topScreenView.doResizeIfNecessary();
if (newTerminalSize != null) {
terminalLength = newTerminalSize.getColumns();
paging.updatePageSize(topScreenView.getPageSize());
topScreenView.clearTerminal();
}
topScreenModel.refreshMetricsData();
paging.updateRecordsSize(topScreenModel.getRecords().size());
adjustFieldLengthIfNeeded();
topScreenView.showTopScreen(topScreenModel.getSummary(), getDisplayedHeaders(),
getDisplayedRecords(), getSelectedRecord());
topScreenView.refreshTerminal();
lastRefreshTimestamp = System.currentTimeMillis();
return refreshDelay.get();
}
public void adjustFieldLength() {
adjustFieldLength.set(true);
refresh(true);
}
private void adjustFieldLengthIfNeeded() {
if (adjustFieldLength.get()) {
adjustFieldLength.set(false);
for (Field f : topScreenModel.getFields()) {
if (f.isAutoAdjust()) {
int maxLength = 0;
for (Record record : topScreenModel.getRecords()) {
int length = record.get(f).asString().length();
maxLength = Math.max(length, maxLength);
}
fieldLengthMap.put(f, Math.max(maxLength, f.getHeader().length()));
}
}
}
}
private List<Header> getDisplayedHeaders() {
List<Field> displayFields = new ArrayList<>();
for (Field field : topScreenModel.getFields()) {
if (fieldDisplayMap.get(field)) {
displayFields.add(field);
}
}
if (displayFields.isEmpty()) {
horizontalScroll = 0;
} else if (horizontalScroll > displayFields.size() - 1) {
horizontalScroll = displayFields.size() - 1;
}
List<Header> ret = new ArrayList<>();
int length = 0;
for (int i = horizontalScroll; i < displayFields.size(); i++) {
Field field = displayFields.get(i);
int fieldLength = fieldLengthMap.get(field);
length += fieldLength + 1;
if (length > terminalLength) {
break;
}
ret.add(new Header(field, fieldLength));
}
return ret;
}
private List<Record> getDisplayedRecords() {
List<Record> ret = new ArrayList<>();
for (int i = paging.getPageStartPosition(); i < paging.getPageEndPosition(); i++) {
ret.add(topScreenModel.getRecords().get(i));
}
return ret;
}
private Record getSelectedRecord() {
if (topScreenModel.getRecords().isEmpty()) {
return null;
}
return topScreenModel.getRecords().get(paging.getCurrentPosition());
}
public void arrowUp() {
paging.arrowUp();
refresh(true);
}
public void arrowDown() {
paging.arrowDown();
refresh(true);
}
public void pageUp() {
paging.pageUp();
refresh(true);
}
public void pageDown() {
paging.pageDown();
refresh(true);
}
public void arrowLeft() {
if (horizontalScroll > 0) {
horizontalScroll -= 1;
}
refresh(true);
}
public void arrowRight() {
if (horizontalScroll < getHeaderSize() - 1) {
horizontalScroll += 1;
}
refresh(true);
}
public void home() {
if (horizontalScroll > 0) {
horizontalScroll = 0;
}
refresh(true);
}
public void end() {
int headerSize = getHeaderSize();
horizontalScroll = headerSize == 0 ? 0 : headerSize - 1;
refresh(true);
}
private int getHeaderSize() {
int size = 0;
for (Field field : topScreenModel.getFields()) {
if (fieldDisplayMap.get(field)) {
size++;
}
}
return size;
}
public void switchSortOrder() {
topScreenModel.switchSortOrder();
refresh(true);
}
public ScreenView transitionToHelpScreen(Screen screen, Terminal terminal) {
return new HelpScreenView(screen, terminal, refreshDelay.get(), topScreenView);
}
public ScreenView transitionToModeScreen(Screen screen, Terminal terminal) {
return new ModeScreenView(screen, terminal, topScreenModel.getCurrentMode(),
new ModeScreenPresenter.ResultListener() {
@Override
public void accept(Mode mode) {
switchMode(mode);
}
}, topScreenView);
}
public ScreenView transitionToFieldScreen(Screen screen, Terminal terminal) {
return new FieldScreenView(screen, terminal,
topScreenModel.getCurrentSortField(), topScreenModel.getFields(),
fieldDisplayMap,
new FieldScreenPresenter.ResultListener() {
@Override
public void accept(Field sortField, List<Field> fields,
EnumMap<Field, Boolean> fieldDisplayMap) {
topScreenModel.setSortFieldAndFields(sortField, fields);
TopScreenPresenter.this.fieldDisplayMap.clear();
TopScreenPresenter.this.fieldDisplayMap.putAll(fieldDisplayMap);
}
}, topScreenView);
}
private void switchMode(Mode nextMode) {
topScreenModel.switchMode(nextMode, null, false);
reset();
}
public void drillDown() {
Record selectedRecord = getSelectedRecord();
if (selectedRecord == null) {
return;
}
if (topScreenModel.drillDown(selectedRecord)) {
reset();
refresh(true);
}
}
private void reset() {
initFieldDisplayMapAndFieldLengthMap();
adjustFieldLength.set(true);
paging.init();
horizontalScroll = 0;
topScreenView.clearTerminal();
}
private void initFieldDisplayMapAndFieldLengthMap() {
fieldDisplayMap.clear();
fieldLengthMap.clear();
for (FieldInfo fieldInfo : topScreenModel.getFieldInfos()) {
fieldDisplayMap.put(fieldInfo.getField(), fieldInfo.isDisplayByDefault());
fieldLengthMap.put(fieldInfo.getField(), fieldInfo.getDefaultLength());
}
}
public ScreenView goToMessageMode(Screen screen, Terminal terminal, int row, String message) {
return new MessageModeScreenView(screen, terminal, row, message, topScreenView);
}
public ScreenView goToInputModeForRefreshDelay(final Screen screen, final Terminal terminal,
final int row) {
return new InputModeScreenView(screen, terminal, row,
"Change refresh delay from " + (double) refreshDelay.get() / 1000 + " to", null,
new InputModeScreenPresenter.ResultListener() {
@Override
public ScreenView apply(String inputString) {
if (inputString.isEmpty()) {
return topScreenView;
}
double delay;
try {
delay = Double.parseDouble(inputString);
} catch (NumberFormatException e) {
return goToMessageMode(screen, terminal, row, "Unacceptable floating point");
}
refreshDelay.set((long) (delay * 1000));
return topScreenView;
}
});
}
public ScreenView goToInputModeForFilter(final Screen screen, final Terminal terminal,
final int row, final boolean ignoreCase) {
return new InputModeScreenView(screen, terminal, row,
"add filter #" + (topScreenModel.getFilters().size() + 1) +
" (" + (ignoreCase ? "ignoring case" : "case sensitive") + ") as: [!]FLD?VAL",
topScreenModel.getFilterHistories(),
new InputModeScreenPresenter.ResultListener() {
@Override
public ScreenView apply(String inputString) {
if (inputString.isEmpty()) {
return topScreenView;
}
if (!topScreenModel.addFilter(inputString, ignoreCase)) {
return goToMessageMode(screen, terminal, row, "Unacceptable filter expression");
}
paging.init();
return topScreenView;
}
});
}
public void clearFilters() {
topScreenModel.clearFilters();
paging.init();
refresh(true);
}
public ScreenView goToFilterDisplayMode(Screen screen, Terminal terminal, int row) {
return new FilterDisplayModeScreenView(screen, terminal, row, topScreenModel.getFilters(),
topScreenView);
}
}

View File

@ -0,0 +1,308 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.mode.Mode;
import org.apache.hadoop.hbase.hbtop.screen.AbstractScreenView;
import org.apache.hadoop.hbase.hbtop.screen.Screen;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize;
/**
* The screen that provides a dynamic real-time view for the HBase metrics.
*
* This shows the metric {@link Summary} and the metric {@link Record}s. The summary and the
* metrics are updated periodically (3 seconds by default).
*/
@InterfaceAudience.Private
public class TopScreenView extends AbstractScreenView {
private static final int SUMMARY_START_ROW = 0;
private static final int SUMMARY_ROW_NUM = 7;
private static final int MESSAGE_ROW = 7;
private static final int RECORD_HEADER_ROW = 8;
private static final int RECORD_START_ROW = 9;
private final TopScreenPresenter topScreenPresenter;
private int pageSize;
public TopScreenView(Screen screen, Terminal terminal, long initialRefreshDelay, Admin admin,
Mode initialMode) {
super(screen, terminal);
this.topScreenPresenter = new TopScreenPresenter(this, initialRefreshDelay,
new TopScreenModel(admin, initialMode));
}
@Override
public void init() {
topScreenPresenter.init();
long delay = topScreenPresenter.refresh(true);
setTimer(delay);
}
@Override
public ScreenView handleTimer() {
long delay = topScreenPresenter.refresh(false);
setTimer(delay);
return this;
}
@Nullable
@Override
public ScreenView handleKeyPress(KeyPress keyPress) {
switch (keyPress.getType()) {
case Enter:
topScreenPresenter.refresh(true);
return this;
case ArrowUp:
topScreenPresenter.arrowUp();
return this;
case ArrowDown:
topScreenPresenter.arrowDown();
return this;
case ArrowLeft:
topScreenPresenter.arrowLeft();
return this;
case ArrowRight:
topScreenPresenter.arrowRight();
return this;
case PageUp:
topScreenPresenter.pageUp();
return this;
case PageDown:
topScreenPresenter.pageDown();
return this;
case Home:
topScreenPresenter.home();
return this;
case End:
topScreenPresenter.end();
return this;
case Escape:
return null;
default:
// Do nothing
break;
}
if (keyPress.getType() != KeyPress.Type.Character) {
return unknownCommandMessage();
}
assert keyPress.getCharacter() != null;
switch (keyPress.getCharacter()) {
case 'R':
topScreenPresenter.switchSortOrder();
break;
case 'f':
cancelTimer();
return topScreenPresenter.transitionToFieldScreen(getScreen(), getTerminal());
case 'm':
cancelTimer();
return topScreenPresenter.transitionToModeScreen(getScreen(), getTerminal());
case 'h':
cancelTimer();
return topScreenPresenter.transitionToHelpScreen(getScreen(), getTerminal());
case 'd':
cancelTimer();
return topScreenPresenter.goToInputModeForRefreshDelay(getScreen(), getTerminal(),
MESSAGE_ROW);
case 'o':
cancelTimer();
if (keyPress.isCtrl()) {
return topScreenPresenter.goToFilterDisplayMode(getScreen(), getTerminal(), MESSAGE_ROW);
}
return topScreenPresenter.goToInputModeForFilter(getScreen(), getTerminal(), MESSAGE_ROW,
true);
case 'O':
cancelTimer();
return topScreenPresenter.goToInputModeForFilter(getScreen(), getTerminal(), MESSAGE_ROW,
false);
case '=':
topScreenPresenter.clearFilters();
break;
case 'X':
topScreenPresenter.adjustFieldLength();
break;
case 'i':
topScreenPresenter.drillDown();
break;
case 'q':
return null;
default:
return unknownCommandMessage();
}
return this;
}
@Override
public TerminalSize getTerminalSize() {
TerminalSize terminalSize = super.getTerminalSize();
updatePageSize(terminalSize);
return terminalSize;
}
@Override
public TerminalSize doResizeIfNecessary() {
TerminalSize terminalSize = super.doResizeIfNecessary();
if (terminalSize == null) {
return null;
}
updatePageSize(terminalSize);
return terminalSize;
}
private void updatePageSize(TerminalSize terminalSize) {
pageSize = terminalSize.getRows() - SUMMARY_ROW_NUM - 2;
if (pageSize < 0) {
pageSize = 0;
}
}
public int getPageSize() {
return pageSize;
}
public void showTopScreen(Summary summary, List<Header> headers, List<Record> records,
Record selectedRecord) {
showSummary(summary);
clearMessage();
showHeaders(headers);
showRecords(headers, records, selectedRecord);
}
private void showSummary(Summary summary) {
TerminalPrinter printer = getTerminalPrinter(SUMMARY_START_ROW);
printer.print(String.format("HBase hbtop - %s", summary.getCurrentTime())).endOfLine();
printer.print(String.format("Version: %s", summary.getVersion())).endOfLine();
printer.print(String.format("Cluster ID: %s", summary.getClusterId())).endOfLine();
printer.print("RegionServer(s): ")
.startBold().print(Integer.toString(summary.getServers())).stopBold()
.print(" total, ")
.startBold().print(Integer.toString(summary.getLiveServers())).stopBold()
.print(" live, ")
.startBold().print(Integer.toString(summary.getDeadServers())).stopBold()
.print(" dead").endOfLine();
printer.print("RegionCount: ")
.startBold().print(Integer.toString(summary.getRegionCount())).stopBold()
.print(" total, ")
.startBold().print(Integer.toString(summary.getRitCount())).stopBold()
.print(" rit").endOfLine();
printer.print("Average Cluster Load: ")
.startBold().print(String.format("%.2f", summary.getAverageLoad())).stopBold().endOfLine();
printer.print("Aggregate Request/s: ")
.startBold().print(Long.toString(summary.getAggregateRequestPerSecond())).stopBold()
.endOfLine();
}
private void showRecords(List<Header> headers, List<Record> records, Record selectedRecord) {
TerminalPrinter printer = getTerminalPrinter(RECORD_START_ROW);
List<String> buf = new ArrayList<>(headers.size());
for (int i = 0; i < pageSize; i++) {
if(i < records.size()) {
Record record = records.get(i);
buf.clear();
for (Header header : headers) {
String value = "";
if (record.containsKey(header.getField())) {
value = record.get(header.getField()).asString();
}
buf.add(limitLineLength(String.format(header.format(), value), header.getLength()));
}
String recordString = StringUtils.join(buf, " ");
if (!recordString.isEmpty()) {
recordString += " ";
}
if (record == selectedRecord) {
printer.startHighlight().print(recordString).stopHighlight().endOfLine();
} else {
printer.print(recordString).endOfLine();
}
} else {
printer.endOfLine();
}
}
}
private void showHeaders(List<Header> headers) {
List<String> headerStrings = new ArrayList<>();
for (Header header : headers) {
headerStrings.add(String.format(header.format(), header.getField().getHeader()));
}
String header = StringUtils.join(headerStrings, " ");
if (!header.isEmpty()) {
header += " ";
}
getTerminalPrinter(RECORD_HEADER_ROW).startHighlight().print(header).stopHighlight()
.endOfLine();
}
private String limitLineLength(String line, int length) {
if (line.length() > length) {
return line.substring(0, length - 1) + "+";
}
return line;
}
private void clearMessage() {
getTerminalPrinter(MESSAGE_ROW).print("").endOfLine();
}
private ScreenView unknownCommandMessage() {
cancelTimer();
return topScreenPresenter.goToMessageMode(getScreen(), getTerminal(), MESSAGE_ROW,
"Unknown command - try 'h' for help");
}
}

View File

@ -0,0 +1,69 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal;
public abstract class AbstractTerminalPrinter implements TerminalPrinter {
@Override
public TerminalPrinter print(Object value) {
print(value.toString());
return this;
}
@Override
public TerminalPrinter print(char value) {
print(Character.toString(value));
return this;
}
@Override
public TerminalPrinter print(short value) {
print(Short.toString(value));
return this;
}
@Override
public TerminalPrinter print(int value) {
print(Integer.toString(value));
return this;
}
@Override
public TerminalPrinter print(long value) {
print(Long.toString(value));
return this;
}
@Override
public TerminalPrinter print(float value) {
print(Float.toString(value));
return this;
}
@Override
public TerminalPrinter print(double value) {
print(Double.toString(value));
return this;
}
@Override
public TerminalPrinter printFormat(String format, Object... args) {
print(String.format(format, args));
return this;
}
}

View File

@ -0,0 +1,128 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* The attributes of text in the terminal.
*/
@InterfaceAudience.Private
public class Attributes {
private boolean bold;
private boolean blink;
private boolean reverse;
private boolean underline;
private Color foregroundColor;
private Color backgroundColor;
public Attributes() {
reset();
}
public Attributes(Attributes attributes) {
set(attributes);
}
public boolean isBold() {
return bold;
}
public void setBold(boolean bold) {
this.bold = bold;
}
public boolean isBlink() {
return blink;
}
public void setBlink(boolean blink) {
this.blink = blink;
}
public boolean isReverse() {
return reverse;
}
public void setReverse(boolean reverse) {
this.reverse = reverse;
}
public boolean isUnderline() {
return underline;
}
public void setUnderline(boolean underline) {
this.underline = underline;
}
public Color getForegroundColor() {
return foregroundColor;
}
public void setForegroundColor(Color foregroundColor) {
this.foregroundColor = foregroundColor;
}
public Color getBackgroundColor() {
return backgroundColor;
}
public void setBackgroundColor(Color backgroundColor) {
this.backgroundColor = backgroundColor;
}
public void reset() {
bold = false;
blink = false;
reverse = false;
underline = false;
foregroundColor = Color.WHITE;
backgroundColor = Color.BLACK;
}
public void set(Attributes attributes) {
bold = attributes.bold;
blink = attributes.blink;
reverse = attributes.reverse;
underline = attributes.underline;
foregroundColor = attributes.foregroundColor;
backgroundColor = attributes.backgroundColor;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Attributes)) {
return false;
}
Attributes that = (Attributes) o;
return bold == that.bold && blink == that.blink && reverse == that.reverse
&& underline == that.underline && foregroundColor == that.foregroundColor
&& backgroundColor == that.backgroundColor;
}
@Override
public int hashCode() {
return Objects.hash(bold, blink, reverse, underline, foregroundColor, backgroundColor);
}
}

View File

@ -0,0 +1,28 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Terminal color definitions.
*/
@InterfaceAudience.Private
public enum Color {
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE
}

View File

@ -0,0 +1,61 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* A 2-d position in 'terminal space'.
*/
@InterfaceAudience.Private
public class CursorPosition {
private final int column;
private final int row;
public CursorPosition(int column, int row) {
this.column = column;
this.row = row;
}
public int getColumn() {
return column;
}
public int getRow() {
return row;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof CursorPosition)) {
return false;
}
CursorPosition that = (CursorPosition) o;
return column == that.column && row == that.row;
}
@Override
public int hashCode() {
return Objects.hash(column, row);
}
}

View File

@ -0,0 +1,128 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Represents the user pressing a key on the keyboard.
*/
@InterfaceAudience.Private
public class KeyPress {
public enum Type {
Character,
Escape,
Backspace,
ArrowLeft,
ArrowRight,
ArrowUp,
ArrowDown,
Insert,
Delete,
Home,
End,
PageUp,
PageDown,
ReverseTab,
Tab,
Enter,
F1,
F2,
F3,
F4,
F5,
F6,
F7,
F8,
F9,
F10,
F11,
F12,
Unknown
}
private final Type type;
private final Character character;
private final boolean alt;
private final boolean ctrl;
private final boolean shift;
public KeyPress(Type type, @Nullable Character character, boolean alt, boolean ctrl,
boolean shift) {
this.type = Objects.requireNonNull(type);
this.character = character;
this.alt = alt;
this.ctrl = ctrl;
this.shift = shift;
}
public Type getType() {
return type;
}
@Nullable
public Character getCharacter() {
return character;
}
public boolean isAlt() {
return alt;
}
public boolean isCtrl() {
return ctrl;
}
public boolean isShift() {
return shift;
}
@Override
public String toString() {
return "KeyPress{" +
"type=" + type +
", character=" + escape(character) +
", alt=" + alt +
", ctrl=" + ctrl +
", shift=" + shift +
'}';
}
private String escape(Character character) {
if (character == null) {
return "null";
}
switch (character) {
case '\n':
return "\\n";
case '\b':
return "\\b";
case '\t':
return "\\t";
default:
return character.toString();
}
}
}

View File

@ -0,0 +1,39 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.io.Closeable;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* The terminal interface that is an abstraction of terminal screen.
*/
@InterfaceAudience.Private
public interface Terminal extends Closeable {
void clear();
void refresh();
TerminalSize getSize();
@Nullable TerminalSize doResizeIfNecessary();
@Nullable KeyPress pollKeyPress();
CursorPosition getCursorPosition();
void setCursorPosition(int column, int row);
void hideCursor();
TerminalPrinter getTerminalPrinter(int startRow);
}

View File

@ -0,0 +1,54 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* The interface responsible for printing to the terminal.
*/
@InterfaceAudience.Private
public interface TerminalPrinter {
TerminalPrinter print(String value);
TerminalPrinter print(Object value);
TerminalPrinter print(char value);
TerminalPrinter print(short value);
TerminalPrinter print(int value);
TerminalPrinter print(long value);
TerminalPrinter print(float value);
TerminalPrinter print(double value);
TerminalPrinter printFormat(String format, Object... args);
TerminalPrinter startHighlight();
TerminalPrinter stopHighlight();
TerminalPrinter startBold();
TerminalPrinter stopBold();
void endOfLine();
}

View File

@ -0,0 +1,61 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Terminal dimensions in 2-d space, measured in number of rows and columns.
*/
@InterfaceAudience.Private
public class TerminalSize {
private final int columns;
private final int rows;
public TerminalSize(int columns, int rows) {
this.columns = columns;
this.rows = rows;
}
public int getColumns() {
return columns;
}
public int getRows() {
return rows;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof TerminalSize)) {
return false;
}
TerminalSize that = (TerminalSize) o;
return columns == that.columns && rows == that.rows;
}
@Override
public int hashCode() {
return Objects.hash(columns, rows);
}
}

View File

@ -0,0 +1,122 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal.impl;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.terminal.Attributes;
import org.apache.hadoop.hbase.hbtop.terminal.Color;
/**
* Represents a single text cell of the terminal.
*/
@InterfaceAudience.Private
public class Cell {
private static final char UNSET_VALUE = (char) 65535;
private static final char END_OF_LINE = '\0';
private final Attributes attributes;
private char ch;
public Cell() {
attributes = new Attributes();
ch = ' ';
}
public char getChar() {
return ch;
}
public void setChar(char ch) {
this.ch = ch;
}
public void reset() {
attributes.reset();
ch = ' ';
}
public void unset() {
attributes.reset();
ch = UNSET_VALUE;
}
public void endOfLine() {
attributes.reset();
ch = END_OF_LINE;
}
public boolean isEndOfLine() {
return ch == END_OF_LINE;
}
public void set(Cell cell) {
attributes.set(cell.attributes);
this.ch = cell.ch;
}
public Attributes getAttributes() {
return new Attributes(attributes);
}
public void setAttributes(Attributes attributes) {
this.attributes.set(attributes);
}
public boolean isBold() {
return attributes.isBold();
}
public boolean isBlink() {
return attributes.isBlink();
}
public boolean isReverse() {
return attributes.isReverse();
}
public boolean isUnderline() {
return attributes.isUnderline();
}
public Color getForegroundColor() {
return attributes.getForegroundColor();
}
public Color getBackgroundColor() {
return attributes.getBackgroundColor();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Cell)) {
return false;
}
Cell cell = (Cell) o;
return ch == cell.ch && attributes.equals(cell.attributes);
}
@Override
public int hashCode() {
return Objects.hash(attributes, ch);
}
}

View File

@ -0,0 +1,140 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal.impl;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.terminal.Color;
/**
* Utility class for escape sequences.
*/
@InterfaceAudience.Private
public final class EscapeSequences {
private EscapeSequences() {
}
public static String clearAll() {
return "\033[0;37;40m\033[2J";
}
public static String setTitle(String title) {
return "\033]2;" + title + "\007";
}
public static String cursor(boolean on) {
if (on) {
return "\033[?25h";
}
return "\033[?25l";
}
public static String moveCursor(int column, int row) {
return String.format("\033[%d;%dH", row + 1, column + 1);
}
public static String clearRemainingLine() {
return "\033[0;37;40m\033[K";
}
public static String color(Color foregroundColor, Color backgroundColor, boolean bold,
boolean reverse, boolean blink, boolean underline) {
int foregroundColorValue = getColorValue(foregroundColor, true);
int backgroundColorValue = getColorValue(backgroundColor, false);
StringBuilder sb = new StringBuilder();
if (bold && reverse && blink && !underline) {
sb.append("\033[0;1;7;5;");
} else if (bold && reverse && !blink && !underline) {
sb.append("\033[0;1;7;");
} else if (!bold && reverse && blink && !underline) {
sb.append("\033[0;7;5;");
} else if (bold && !reverse && blink && !underline) {
sb.append("\033[0;1;5;");
} else if (bold && !reverse && !blink && !underline) {
sb.append("\033[0;1;");
} else if (!bold && reverse && !blink && !underline) {
sb.append("\033[0;7;");
} else if (!bold && !reverse && blink && !underline) {
sb.append("\033[0;5;");
} else if (bold && reverse && blink) {
sb.append("\033[0;1;7;5;4;");
} else if (bold && reverse) {
sb.append("\033[0;1;7;4;");
} else if (!bold && reverse && blink) {
sb.append("\033[0;7;5;4;");
} else if (bold && blink) {
sb.append("\033[0;1;5;4;");
} else if (bold) {
sb.append("\033[0;1;4;");
} else if (reverse) {
sb.append("\033[0;7;4;");
} else if (blink) {
sb.append("\033[0;5;4;");
} else if (underline) {
sb.append("\033[0;4;");
} else {
sb.append("\033[0;");
}
sb.append(String.format("%d;%dm", foregroundColorValue, backgroundColorValue));
return sb.toString();
}
private static int getColorValue(Color color, boolean foreground) {
int baseValue;
if (foreground) {
baseValue = 30;
} else { // background
baseValue = 40;
}
switch (color) {
case BLACK:
return baseValue;
case RED:
return baseValue + 1;
case GREEN:
return baseValue + 2;
case YELLOW:
return baseValue + 3;
case BLUE:
return baseValue + 4;
case MAGENTA:
return baseValue + 5;
case CYAN:
return baseValue + 6;
case WHITE:
return baseValue + 7;
default:
throw new AssertionError();
}
}
public static String normal() {
return "\033[0;37;40m";
}
}

View File

@ -0,0 +1,500 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal.impl;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.nio.charset.StandardCharsets;
import java.util.Queue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
import org.apache.hadoop.hbase.util.Threads;
/**
* This generates {@link KeyPress} objects from the given input stream and offers them to the
* given queue.
*/
@InterfaceAudience.Private
public class KeyPressGenerator {
private static final Log LOG = LogFactory.getLog(KeyPressGenerator.class);
private enum ParseState {
START, ESCAPE, ESCAPE_SEQUENCE_PARAM1, ESCAPE_SEQUENCE_PARAM2
}
private final Queue<KeyPress> keyPressQueue;
private final BlockingQueue<Character> inputCharacterQueue = new LinkedBlockingQueue<>();
private final Reader input;
private final InputStream inputStream;
private final AtomicBoolean stopThreads = new AtomicBoolean();
private final ExecutorService executorService;
private ParseState parseState;
private int param1;
private int param2;
public KeyPressGenerator(InputStream inputStream, Queue<KeyPress> keyPressQueue) {
this.inputStream = inputStream;
input = new InputStreamReader(inputStream, StandardCharsets.UTF_8);
this.keyPressQueue = keyPressQueue;
executorService = Executors.newFixedThreadPool(2, new ThreadFactoryBuilder()
.setNameFormat("KeyPressGenerator-%d").setDaemon(true)
.setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
initState();
}
public void start() {
executorService.execute(new Runnable() {
@Override
public void run() {
readerThread();
}
});
executorService.execute(new Runnable() {
@Override
public void run() {
generatorThread();
}
});
}
private void initState() {
parseState = ParseState.START;
param1 = 0;
param2 = 0;
}
private void readerThread() {
boolean done = false;
char[] readBuffer = new char[128];
while (!done && !stopThreads.get()) {
try {
int n = inputStream.available();
if (n > 0) {
if (readBuffer.length < n) {
readBuffer = new char[readBuffer.length * 2];
}
int rc = input.read(readBuffer, 0, readBuffer.length);
if (rc == -1) {
// EOF
done = true;
} else {
for (int i = 0; i < rc; i++) {
int ch = readBuffer[i];
inputCharacterQueue.offer((char) ch);
}
}
} else {
Thread.sleep(20);
}
} catch (InterruptedException ignored) {
} catch (IOException e) {
LOG.error("Caught an exception", e);
done = true;
}
}
}
private void generatorThread() {
while (!stopThreads.get()) {
Character ch;
try {
ch = inputCharacterQueue.poll(100, TimeUnit.MILLISECONDS);
} catch (InterruptedException ignored) {
continue;
}
if (ch == null) {
if (parseState == ParseState.ESCAPE) {
offer(new KeyPress(KeyPress.Type.Escape, null, false, false, false));
initState();
} else if (parseState != ParseState.START) {
offer(new KeyPress(KeyPress.Type.Unknown, null, false, false, false));
initState();
}
continue;
}
if (parseState == ParseState.START) {
if (ch == 0x1B) {
parseState = ParseState.ESCAPE;
continue;
}
switch (ch) {
case '\n':
case '\r':
offer(new KeyPress(KeyPress.Type.Enter, '\n', false, false, false));
continue;
case 0x08:
case 0x7F:
offer(new KeyPress(KeyPress.Type.Backspace, '\b', false, false, false));
continue;
case '\t':
offer(new KeyPress(KeyPress.Type.Tab, '\t', false, false, false));
continue;
default:
// Do nothing
break;
}
if (ch < 32) {
ctrlAndCharacter(ch);
continue;
}
if (isPrintableChar(ch)) {
// Normal character
offer(new KeyPress(KeyPress.Type.Character, ch, false, false, false));
continue;
}
offer(new KeyPress(KeyPress.Type.Unknown, null, false, false, false));
continue;
}
if (parseState == ParseState.ESCAPE) {
if (ch == 0x1B) {
offer(new KeyPress(KeyPress.Type.Escape, null, false, false, false));
continue;
}
if (ch < 32 && ch != 0x08) {
ctrlAltAndCharacter(ch);
initState();
continue;
} else if (ch == 0x7F || ch == 0x08) {
offer(new KeyPress(KeyPress.Type.Backspace, '\b', false, false, false));
initState();
continue;
}
if (ch == '[' || ch == 'O') {
parseState = ParseState.ESCAPE_SEQUENCE_PARAM1;
continue;
}
if (isPrintableChar(ch)) {
// Alt and character
offer(new KeyPress(KeyPress.Type.Character, ch, true, false, false));
initState();
continue;
}
offer(new KeyPress(KeyPress.Type.Escape, null, false, false, false));
offer(new KeyPress(KeyPress.Type.Unknown, null, false, false, false));
initState();
continue;
}
escapeSequenceCharacter(ch);
}
}
private void ctrlAndCharacter(char ch) {
char ctrlCode;
switch (ch) {
case 0:
ctrlCode = ' ';
break;
case 28:
ctrlCode = '\\';
break;
case 29:
ctrlCode = ']';
break;
case 30:
ctrlCode = '^';
break;
case 31:
ctrlCode = '_';
break;
default:
ctrlCode = (char) ('a' - 1 + ch);
break;
}
offer(new KeyPress(KeyPress.Type.Character, ctrlCode, false, true, false));
}
private boolean isPrintableChar(char ch) {
if (Character.isISOControl(ch)) {
return false;
}
Character.UnicodeBlock block = Character.UnicodeBlock.of(ch);
return block != null && !block.equals(Character.UnicodeBlock.SPECIALS);
}
private void ctrlAltAndCharacter(char ch) {
char ctrlCode;
switch (ch) {
case 0:
ctrlCode = ' ';
break;
case 28:
ctrlCode = '\\';
break;
case 29:
ctrlCode = ']';
break;
case 30:
ctrlCode = '^';
break;
case 31:
ctrlCode = '_';
break;
default:
ctrlCode = (char) ('a' - 1 + ch);
break;
}
offer(new KeyPress(KeyPress.Type.Character, ctrlCode, true, true, false));
}
private void escapeSequenceCharacter(char ch) {
switch (parseState) {
case ESCAPE_SEQUENCE_PARAM1:
if (ch == ';') {
parseState = ParseState.ESCAPE_SEQUENCE_PARAM2;
} else if (Character.isDigit(ch)) {
param1 = param1 * 10 + Character.digit(ch, 10);
} else {
doneEscapeSequenceCharacter(ch);
}
break;
case ESCAPE_SEQUENCE_PARAM2:
if (Character.isDigit(ch)) {
param2 = param2 * 10 + Character.digit(ch, 10);
} else {
doneEscapeSequenceCharacter(ch);
}
break;
default:
throw new AssertionError();
}
}
private void doneEscapeSequenceCharacter(char last) {
boolean alt = false;
boolean ctrl = false;
boolean shift = false;
if (param2 != 0) {
alt = isAlt(param2);
ctrl = isCtrl(param2);
shift = isShift(param2);
}
if (last != '~') {
switch (last) {
case 'A':
offer(new KeyPress(KeyPress.Type.ArrowUp, null, alt, ctrl, shift));
break;
case 'B':
offer(new KeyPress(KeyPress.Type.ArrowDown, null, alt, ctrl, shift));
break;
case 'C':
offer(new KeyPress(KeyPress.Type.ArrowRight, null, alt, ctrl, shift));
break;
case 'D':
offer(new KeyPress(KeyPress.Type.ArrowLeft, null, alt, ctrl, shift));
break;
case 'H':
offer(new KeyPress(KeyPress.Type.Home, null, alt, ctrl, shift));
break;
case 'F':
offer(new KeyPress(KeyPress.Type.End, null, alt, ctrl, shift));
break;
case 'P':
offer(new KeyPress(KeyPress.Type.F1, null, alt, ctrl, shift));
break;
case 'Q':
offer(new KeyPress(KeyPress.Type.F2, null, alt, ctrl, shift));
break;
case 'R':
offer(new KeyPress(KeyPress.Type.F3, null, alt, ctrl, shift));
break;
case 'S':
offer(new KeyPress(KeyPress.Type.F4, null, alt, ctrl, shift));
break;
case 'Z':
offer(new KeyPress(KeyPress.Type.ReverseTab, null, alt, ctrl, shift));
break;
default:
offer(new KeyPress(KeyPress.Type.Unknown, null, alt, ctrl, shift));
break;
}
initState();
return;
}
switch (param1) {
case 1:
offer(new KeyPress(KeyPress.Type.Home, null, alt, ctrl, shift));
break;
case 2:
offer(new KeyPress(KeyPress.Type.Insert, null, alt, ctrl, shift));
break;
case 3:
offer(new KeyPress(KeyPress.Type.Delete, null, alt, ctrl, shift));
break;
case 4:
offer(new KeyPress(KeyPress.Type.End, null, alt, ctrl, shift));
break;
case 5:
offer(new KeyPress(KeyPress.Type.PageUp, null, alt, ctrl, shift));
break;
case 6:
offer(new KeyPress(KeyPress.Type.PageDown, null, alt, ctrl, shift));
break;
case 11:
offer(new KeyPress(KeyPress.Type.F1, null, alt, ctrl, shift));
break;
case 12:
offer(new KeyPress(KeyPress.Type.F2, null, alt, ctrl, shift));
break;
case 13:
offer(new KeyPress(KeyPress.Type.F3, null, alt, ctrl, shift));
break;
case 14:
offer(new KeyPress(KeyPress.Type.F4, null, alt, ctrl, shift));
break;
case 15:
offer(new KeyPress(KeyPress.Type.F5, null, alt, ctrl, shift));
break;
case 17:
offer(new KeyPress(KeyPress.Type.F6, null, alt, ctrl, shift));
break;
case 18:
offer(new KeyPress(KeyPress.Type.F7, null, alt, ctrl, shift));
break;
case 19:
offer(new KeyPress(KeyPress.Type.F8, null, alt, ctrl, shift));
break;
case 20:
offer(new KeyPress(KeyPress.Type.F9, null, alt, ctrl, shift));
break;
case 21:
offer(new KeyPress(KeyPress.Type.F10, null, alt, ctrl, shift));
break;
case 23:
offer(new KeyPress(KeyPress.Type.F11, null, alt, ctrl, shift));
break;
case 24:
offer(new KeyPress(KeyPress.Type.F12, null, alt, ctrl, shift));
break;
default:
offer(new KeyPress(KeyPress.Type.Unknown, null, false, false, false));
break;
}
initState();
}
private boolean isShift(int param) {
return (param & 1) != 0;
}
private boolean isAlt(int param) {
return (param & 2) != 0;
}
private boolean isCtrl(int param) {
return (param & 4) != 0;
}
private void offer(KeyPress keyPress) {
// Handle ctrl + c
if (keyPress.isCtrl() && keyPress.getType() == KeyPress.Type.Character &&
keyPress.getCharacter() == 'c') {
System.exit(0);
}
keyPressQueue.offer(keyPress);
}
public void stop() {
stopThreads.set(true);
executorService.shutdown();
try {
while (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
LOG.warn("Waiting for thread-pool to terminate");
}
} catch (InterruptedException e) {
LOG.warn("Interrupted while waiting for thread-pool termination", e);
}
}
}

View File

@ -0,0 +1,170 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal.impl;
import static org.apache.hadoop.hbase.hbtop.terminal.impl.EscapeSequences.clearRemainingLine;
import static org.apache.hadoop.hbase.hbtop.terminal.impl.EscapeSequences.color;
import static org.apache.hadoop.hbase.hbtop.terminal.impl.EscapeSequences.cursor;
import static org.apache.hadoop.hbase.hbtop.terminal.impl.EscapeSequences.moveCursor;
import static org.apache.hadoop.hbase.hbtop.terminal.impl.EscapeSequences.normal;
import java.io.PrintWriter;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.terminal.Attributes;
import org.apache.hadoop.hbase.hbtop.terminal.CursorPosition;
/**
* Represents a buffer of the terminal screen for double-buffering.
*/
@InterfaceAudience.Private
public class ScreenBuffer {
private int columns;
private int rows;
private Cell[][] buffer;
private Cell[][] physical;
private boolean cursorVisible;
private int cursorColumn;
private int cursorRow;
public void reallocate(int columns, int rows) {
buffer = new Cell[columns][rows];
physical = new Cell[columns][rows];
for (int row = 0; row < rows; row++) {
for (int column = 0; column < columns; column++) {
buffer[column][row] = new Cell();
physical[column][row] = new Cell();
physical[column][row].unset();
}
}
this.columns = columns;
this.rows = rows;
}
public void clear() {
for (int row = 0; row < rows; row++) {
for (int col = 0; col < columns; col++) {
buffer[col][row].reset();
}
}
}
public void flush(PrintWriter output) {
StringBuilder sb = new StringBuilder();
sb.append(normal());
Attributes attributes = new Attributes();
for (int row = 0; row < rows; row++) {
flushRow(row, sb, attributes);
}
if (cursorVisible && cursorRow >= 0 && cursorColumn >= 0 && cursorRow < rows &&
cursorColumn < columns) {
sb.append(cursor(true));
sb.append(moveCursor(cursorColumn, cursorRow));
} else {
sb.append(cursor(false));
}
output.write(sb.toString());
output.flush();
}
private void flushRow(int row, StringBuilder sb, Attributes lastAttributes) {
int lastColumn = -1;
for (int column = 0; column < columns; column++) {
Cell cell = buffer[column][row];
Cell pCell = physical[column][row];
if (!cell.equals(pCell)) {
if (lastColumn != column - 1 || lastColumn == -1) {
sb.append(moveCursor(column, row));
}
if (cell.isEndOfLine()) {
for (int i = column; i < columns; i++) {
physical[i][row].set(buffer[i][row]);
}
sb.append(clearRemainingLine());
lastAttributes.reset();
return;
}
if (!cell.getAttributes().equals(lastAttributes)) {
sb.append(color(cell.getForegroundColor(), cell.getBackgroundColor(), cell.isBold(),
cell.isReverse(), cell.isBlink(), cell.isUnderline()));
}
sb.append(cell.getChar());
lastColumn = column;
lastAttributes.set(cell.getAttributes());
physical[column][row].set(cell);
}
}
}
public CursorPosition getCursorPosition() {
return new CursorPosition(cursorColumn, cursorRow);
}
public void setCursorPosition(int column, int row) {
cursorVisible = true;
cursorColumn = column;
cursorRow = row;
}
public void hideCursor() {
cursorVisible = false;
}
public void putString(int column, int row, String string, Attributes attributes) {
int i = column;
for (int j = 0; j < string.length(); j++) {
char ch = string.charAt(j);
putChar(i, row, ch, attributes);
i += 1;
if (i == columns) {
break;
}
}
}
public void putChar(int column, int row, char ch, Attributes attributes) {
if (column >= 0 && column < columns && row >= 0 && row < rows) {
buffer[column][row].setAttributes(attributes);
buffer[column][row].setChar(ch);
}
}
public void endOfLine(int column, int row) {
if (column >= 0 && column < columns && row >= 0 && row < rows) {
buffer[column][row].endOfLine();
for (int i = column + 1; i < columns; i++) {
buffer[i][row].reset();
}
}
}
}

View File

@ -0,0 +1,229 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal.impl;
import static org.apache.hadoop.hbase.hbtop.terminal.impl.EscapeSequences.clearAll;
import static org.apache.hadoop.hbase.hbtop.terminal.impl.EscapeSequences.cursor;
import static org.apache.hadoop.hbase.hbtop.terminal.impl.EscapeSequences.moveCursor;
import static org.apache.hadoop.hbase.hbtop.terminal.impl.EscapeSequences.normal;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.nio.charset.StandardCharsets;
import java.util.Queue;
import java.util.StringTokenizer;
import java.util.concurrent.ConcurrentLinkedQueue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.terminal.CursorPosition;
import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize;
/**
* The implementation of the {@link Terminal} interface.
*/
@InterfaceAudience.Private
public class TerminalImpl implements Terminal {
private static final Log LOG = LogFactory.getLog(TerminalImpl.class);
private TerminalSize cachedTerminalSize;
private final PrintWriter output;
private final ScreenBuffer screenBuffer;
private final Queue<KeyPress> keyPressQueue;
private final KeyPressGenerator keyPressGenerator;
public TerminalImpl() {
this(null);
}
public TerminalImpl(@Nullable String title) {
output = new PrintWriter(new OutputStreamWriter(System.out, StandardCharsets.UTF_8));
sttyRaw();
if (title != null) {
setTitle(title);
}
screenBuffer = new ScreenBuffer();
cachedTerminalSize = queryTerminalSize();
updateTerminalSize(cachedTerminalSize.getColumns(), cachedTerminalSize.getRows());
keyPressQueue = new ConcurrentLinkedQueue<>();
keyPressGenerator = new KeyPressGenerator(System.in, keyPressQueue);
keyPressGenerator.start();
Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
@Override
public void run() {
output.printf("%s%s%s%s", moveCursor(0, 0), cursor(true), normal(), clearAll());
output.flush();
sttyCooked();
}
}));
// Clear the terminal
output.write(clearAll());
output.flush();
}
private void setTitle(String title) {
output.write(EscapeSequences.setTitle(title));
output.flush();
}
private void updateTerminalSize(int columns, int rows) {
screenBuffer.reallocate(columns, rows);
}
@Override
public void clear() {
screenBuffer.clear();
}
@Override
public void refresh() {
screenBuffer.flush(output);
}
@Override
public TerminalSize getSize() {
return cachedTerminalSize;
}
@Nullable
@Override
public TerminalSize doResizeIfNecessary() {
TerminalSize currentTerminalSize = queryTerminalSize();
if (!currentTerminalSize.equals(cachedTerminalSize)) {
cachedTerminalSize = currentTerminalSize;
updateTerminalSize(cachedTerminalSize.getColumns(), cachedTerminalSize.getRows());
return cachedTerminalSize;
}
return null;
}
@Nullable
@Override
public KeyPress pollKeyPress() {
return keyPressQueue.poll();
}
@Override
public CursorPosition getCursorPosition() {
return screenBuffer.getCursorPosition();
}
@Override
public void setCursorPosition(int column, int row) {
screenBuffer.setCursorPosition(column, row);
}
@Override
public void hideCursor() {
screenBuffer.hideCursor();
}
@Override
public TerminalPrinter getTerminalPrinter(int startRow) {
return new TerminalPrinterImpl(screenBuffer, startRow);
}
@Override
public void close() {
keyPressGenerator.stop();
}
private TerminalSize queryTerminalSize() {
String sizeString = doStty("size");
int rows = 0;
int columns = 0;
StringTokenizer tokenizer = new StringTokenizer(sizeString);
int rc = Integer.parseInt(tokenizer.nextToken());
if (rc > 0) {
rows = rc;
}
rc = Integer.parseInt(tokenizer.nextToken());
if (rc > 0) {
columns = rc;
}
return new TerminalSize(columns, rows);
}
private void sttyRaw() {
doStty("-ignbrk -brkint -parmrk -istrip -inlcr -igncr -icrnl -ixon -opost " +
"-echo -echonl -icanon -isig -iexten -parenb cs8 min 1");
}
private void sttyCooked() {
doStty("sane cooked");
}
private String doStty(String sttyOptionsString) {
String [] cmd = {"/bin/sh", "-c", "stty " + sttyOptionsString + " < /dev/tty"};
try {
Process process = Runtime.getRuntime().exec(cmd);
String ret;
// stdout
try (BufferedReader stdout = new BufferedReader(new InputStreamReader(
process.getInputStream(), StandardCharsets.UTF_8))) {
ret = stdout.readLine();
}
// stderr
try (BufferedReader stderr = new BufferedReader(new InputStreamReader(
process.getErrorStream(), StandardCharsets.UTF_8))) {
String line = stderr.readLine();
if ((line != null) && (line.length() > 0)) {
LOG.error("Error output from stty: " + line);
}
}
try {
process.waitFor();
} catch (InterruptedException ignored) {
}
int exitValue = process.exitValue();
if (exitValue != 0) {
LOG.error("stty returned error code: " + exitValue);
}
return ret;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}

View File

@ -0,0 +1,83 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal.impl;
import java.util.Objects;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.hbtop.terminal.AbstractTerminalPrinter;
import org.apache.hadoop.hbase.hbtop.terminal.Attributes;
import org.apache.hadoop.hbase.hbtop.terminal.Color;
import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
/**
* The implementation of the {@link TerminalPrinter} interface.
*/
@InterfaceAudience.Private
public class TerminalPrinterImpl extends AbstractTerminalPrinter {
private final ScreenBuffer screenBuffer;
private int row;
private int column;
private final Attributes attributes = new Attributes();
TerminalPrinterImpl(ScreenBuffer screenBuffer, int startRow) {
this.screenBuffer = Objects.requireNonNull(screenBuffer);
this.row = startRow;
}
@Override
public TerminalPrinter print(String value) {
screenBuffer.putString(column, row, value, attributes);
column += value.length();
return this;
}
@Override
public TerminalPrinter startHighlight() {
attributes.setForegroundColor(Color.BLACK);
attributes.setBackgroundColor(Color.WHITE);
return this;
}
@Override
public TerminalPrinter stopHighlight() {
attributes.setForegroundColor(Color.WHITE);
attributes.setBackgroundColor(Color.BLACK);
return this;
}
@Override
public TerminalPrinter startBold() {
attributes.setBold(true);
return this;
}
@Override
public TerminalPrinter stopBold() {
attributes.setBold(false);
return this;
}
@Override
public void endOfLine() {
screenBuffer.endOfLine(column, row);
row += 1;
column = 0;
}
}

View File

@ -0,0 +1,87 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop;
import static org.apache.hadoop.hbase.hbtop.Record.entry;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category(SmallTests.class)
public class TestRecord {
@Test
public void testBuilder() {
Record actual1 = Record.builder().put(Field.TABLE, "tableName")
.put(entry(Field.REGION_COUNT, 3))
.put(Field.REQUEST_COUNT_PER_SECOND, Field.REQUEST_COUNT_PER_SECOND.newValue(100L))
.build();
assertThat(actual1.size(), is(3));
assertThat(actual1.get(Field.TABLE).asString(), is("tableName"));
assertThat(actual1.get(Field.REGION_COUNT).asInt(), is(3));
assertThat(actual1.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(100L));
Record actual2 = Record.builder().putAll(actual1).build();
assertThat(actual2.size(), is(3));
assertThat(actual2.get(Field.TABLE).asString(), is("tableName"));
assertThat(actual2.get(Field.REGION_COUNT).asInt(), is(3));
assertThat(actual2.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(100L));
}
@Test
public void testOfEntries() {
Record actual = Record.ofEntries(
entry(Field.TABLE, "tableName"),
entry(Field.REGION_COUNT, 3),
entry(Field.REQUEST_COUNT_PER_SECOND, 100L)
);
assertThat(actual.size(), is(3));
assertThat(actual.get(Field.TABLE).asString(), is("tableName"));
assertThat(actual.get(Field.REGION_COUNT).asInt(), is(3));
assertThat(actual.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(100L));
}
@Test
public void testCombine() {
Record record1 = Record.ofEntries(
entry(Field.TABLE, "tableName"),
entry(Field.REGION_COUNT, 3),
entry(Field.REQUEST_COUNT_PER_SECOND, 100L)
);
Record record2 = Record.ofEntries(
entry(Field.TABLE, "tableName"),
entry(Field.REGION_COUNT, 5),
entry(Field.REQUEST_COUNT_PER_SECOND, 500L)
);
Record actual = record1.combine(record2);
assertThat(actual.size(), is(3));
assertThat(actual.get(Field.TABLE).asString(), is("tableName"));
assertThat(actual.get(Field.REGION_COUNT).asInt(), is(8));
assertThat(actual.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(600L));
}
}

View File

@ -0,0 +1,209 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.junit.Assert.assertThat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.Size;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category(SmallTests.class)
public class TestRecordFilter {
@Test
public void testParseAndBuilder() {
testParseAndBuilder("REGION=region1", false,
RecordFilter.newBuilder(Field.REGION).equal("region1"));
testParseAndBuilder("REGION=", false,
RecordFilter.newBuilder(Field.REGION).equal(""));
testParseAndBuilder("!REGION=region1", false,
RecordFilter.newBuilder(Field.REGION).notEqual("region1"));
testParseAndBuilder("REGION==region2", true,
RecordFilter.newBuilder(Field.REGION, true).doubleEquals("region2"));
testParseAndBuilder("!REGION==region2", true,
RecordFilter.newBuilder(Field.REGION, true).notDoubleEquals("region2"));
testParseAndBuilder("#REQ/S>100", false,
RecordFilter.newBuilder(Field.REQUEST_COUNT_PER_SECOND).greater(100L));
testParseAndBuilder("!#REQ/S>100", false,
RecordFilter.newBuilder(Field.REQUEST_COUNT_PER_SECOND).notGreater(100L));
testParseAndBuilder("SF>=50MB", true,
RecordFilter.newBuilder(Field.STORE_FILE_SIZE, true).greaterOrEqual("50MB"));
testParseAndBuilder("!SF>=50MB", true,
RecordFilter.newBuilder(Field.STORE_FILE_SIZE, true).notGreaterOrEqual("50MB"));
testParseAndBuilder("#REQ/S<20", false,
RecordFilter.newBuilder(Field.REQUEST_COUNT_PER_SECOND).less(20L));
testParseAndBuilder("!#REQ/S<20", false,
RecordFilter.newBuilder(Field.REQUEST_COUNT_PER_SECOND).notLess(20L));
testParseAndBuilder("%COMP<=50%", true,
RecordFilter.newBuilder(Field.COMPACTION_PROGRESS, true).lessOrEqual("50%"));
testParseAndBuilder("!%COMP<=50%", true,
RecordFilter.newBuilder(Field.COMPACTION_PROGRESS, true).notLessOrEqual("50%"));
}
private void testParseAndBuilder(String filterString, boolean ignoreCase, RecordFilter expected) {
RecordFilter actual = RecordFilter.parse(filterString, ignoreCase);
assertThat(expected, is(actual));
}
@Test
public void testParseFailure() {
RecordFilter filter = RecordFilter.parse("REGIO=region1", false);
assertThat(filter, is(nullValue()));
filter = RecordFilter.parse("", false);
assertThat(filter, is(nullValue()));
filter = RecordFilter.parse("#REQ/S==aaa", false);
assertThat(filter, is(nullValue()));
filter = RecordFilter.parse("SF>=50", false);
assertThat(filter, is(nullValue()));
}
@Test
public void testToString() {
testToString("REGION=region1");
testToString("!REGION=region1");
testToString("REGION==region2");
testToString("!REGION==region2");
testToString("#REQ/S>100");
testToString("!#REQ/S>100");
testToString("SF>=50.0MB");
testToString("!SF>=50.0MB");
testToString("#REQ/S<20");
testToString("!#REQ/S<20");
testToString("%COMP<=50.00%");
testToString("!%COMP<=50.00%");
}
private void testToString(String filterString) {
RecordFilter filter = RecordFilter.parse(filterString, false);
assertThat(filter, is(notNullValue()));
assertThat(filterString, is(filter.toString()));
}
@Test
public void testFilters() {
List<Record> records = createTestRecords();
testFilter(records, "REGION=region", false,
"region1", "region2", "region3", "region4", "region5");
testFilter(records, "!REGION=region", false);
testFilter(records, "REGION=Region", false);
testFilter(records, "REGION==region", false);
testFilter(records, "REGION==region1", false, "region1");
testFilter(records, "!REGION==region1", false, "region2", "region3", "region4", "region5");
testFilter(records, "#REQ/S==100", false, "region1");
testFilter(records, "#REQ/S>100", false, "region2", "region5");
testFilter(records, "SF>=100MB", false, "region1", "region2", "region4", "region5");
testFilter(records, "!#SF>=10", false, "region1", "region4");
testFilter(records, "LOCALITY<0.5", false, "region5");
testFilter(records, "%COMP<=50%", false, "region2", "region3", "region4", "region5");
testFilters(records, Arrays.asList("SF>=100MB", "#REQ/S>100"), false,
"region2", "region5");
testFilters(records, Arrays.asList("%COMP<=50%", "!#SF>=10"), false, "region4");
testFilters(records, Arrays.asList("!REGION==region1", "LOCALITY<0.5", "#REQ/S>100"), false,
"region5");
}
@Test
public void testFiltersIgnoreCase() {
List<Record> records = createTestRecords();
testFilter(records, "REGION=Region", true,
"region1", "region2", "region3", "region4", "region5");
testFilter(records, "REGION=REGION", true,
"region1", "region2", "region3", "region4", "region5");
}
private List<Record> createTestRecords() {
List<Record> ret = new ArrayList<>();
ret.add(createTestRecord("region1", 100L, new Size(100, Size.Unit.MEGABYTE), 2, 1.0f, 80f));
ret.add(createTestRecord("region2", 120L, new Size(100, Size.Unit.GIGABYTE), 10, 0.5f, 20f));
ret.add(createTestRecord("region3", 50L, new Size(500, Size.Unit.KILOBYTE), 15, 0.8f, 50f));
ret.add(createTestRecord("region4", 90L, new Size(10, Size.Unit.TERABYTE), 5, 0.9f, 30f));
ret.add(createTestRecord("region5", 200L, new Size(1, Size.Unit.PETABYTE), 13, 0.1f, 40f));
return ret;
}
private Record createTestRecord(String region, long requestCountPerSecond,
Size storeFileSize, int numStoreFiles, float locality, float compactionProgress) {
Record.Builder builder = Record.builder();
builder.put(Field.REGION, region);
builder.put(Field.REQUEST_COUNT_PER_SECOND, requestCountPerSecond);
builder.put(Field.STORE_FILE_SIZE, storeFileSize);
builder.put(Field.NUM_STORE_FILES, numStoreFiles);
builder.put(Field.LOCALITY, locality);
builder.put(Field.COMPACTION_PROGRESS, compactionProgress);
return builder.build();
}
private void testFilter(List<Record> records, String filterString, boolean ignoreCase,
String... expectedRegions) {
testFilters(records, Collections.singletonList(filterString), ignoreCase, expectedRegions);
}
private void testFilters(List<Record> records, List<String> filterStrings, boolean ignoreCase,
String... expectedRegions) {
List<String> actual = new ArrayList<>();
for (Record record : records) {
boolean filter = false;
for (String filterString : filterStrings) {
if (!RecordFilter.parse(filterString, ignoreCase).execute(record)) {
filter = true;
}
}
if (!filter) {
actual.add(record.get(Field.REGION).asString());
}
}
assertThat(actual.size(), is(expectedRegions.length));
for (int i = 0; i < actual.size(); i++) {
String actualRegion = actual.get(i);
String expectedRegion = expectedRegions[i];
assertThat(actualRegion, is(expectedRegion));
}
}
}

View File

@ -0,0 +1,373 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.protobuf.ByteString;
import java.text.ParseException;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.lang3.time.FastDateFormat;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.Size;
import org.apache.hadoop.hbase.hbtop.field.Size.Unit;
import org.apache.hadoop.hbase.hbtop.screen.top.Summary;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
public final class TestUtils {
static final String HBASE_VERSION = "1.5.0-SNAPSHOT";
static final String CLUSTER_UUID = "01234567-89ab-cdef-0123-456789abcdef";
private TestUtils() { }
public static ClusterStatus createDummyClusterStatus() {
Map<ServerName, ServerLoad> serverLoads = Maps.newHashMap();
List<ServerName> deadServers = Lists.newArrayList();
Set<RegionState> rit = Sets.newHashSet();
ServerName host1 = ServerName.valueOf("host1.apache.com", 1000, 1);
serverLoads.put(host1,
createServerLoad(100,
new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 100,
Lists.newArrayList(
createRegionLoad("table1,,1.00000000000000000000000000000000.", 100, 100,
new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1,
new Size(100, Size.Unit.MEGABYTE), 0.1f, 100, 100, "2019-07-22 00:00:00"),
createRegionLoad("table2,1,2.00000000000000000000000000000001.", 200, 200,
new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2,
new Size(200, Size.Unit.MEGABYTE), 0.2f, 50, 200, "2019-07-22 00:00:01"),
createRegionLoad(
"namespace:table3,,3_0001.00000000000000000000000000000002.", 300, 300,
new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3,
new Size(300, Size.Unit.MEGABYTE), 0.3f, 100, 300, "2019-07-22 00:00:02"))));
ServerName host2 = ServerName.valueOf("host2.apache.com", 1001, 2);
serverLoads.put(host2,
createServerLoad(200,
new Size(16, Size.Unit.GIGABYTE), new Size(32, Size.Unit.GIGABYTE), 200,
Lists.newArrayList(
createRegionLoad("table1,1,4.00000000000000000000000000000003.", 100, 100,
new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1,
new Size(100, Size.Unit.MEGABYTE), 0.4f, 50, 100, "2019-07-22 00:00:03"),
createRegionLoad("table2,,5.00000000000000000000000000000004.", 200, 200,
new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2,
new Size(200, Size.Unit.MEGABYTE), 0.5f, 150, 200, "2019-07-22 00:00:04"),
createRegionLoad("namespace:table3,,6.00000000000000000000000000000005.", 300, 300,
new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3,
new Size(300, Size.Unit.MEGABYTE), 0.6f, 200, 300, "2019-07-22 00:00:05"))));
ServerName host3 = ServerName.valueOf("host3.apache.com", 1002, 3);
deadServers.add(host3);
rit.add(new RegionState(new HRegionInfo(0, TableName.valueOf("table4"), 0),
RegionState.State.OFFLINE, host3));
return new ClusterStatus(HBASE_VERSION, CLUSTER_UUID, serverLoads, deadServers, null, null,
rit, new String[0], true);
}
private static ClusterStatusProtos.RegionLoad createRegionLoad(String regionName,
long readRequestCount, long writeRequestCount, Size storeFileSize,
Size uncompressedStoreFileSize, int storeFileCount, Size memStoreSize, float locality,
long compactedCellCount, long compactingCellCount, String lastMajorCompactionTime) {
FastDateFormat df = FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss");
try {
return ClusterStatusProtos.RegionLoad.newBuilder()
.setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder()
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
.setValue(ByteString.copyFromUtf8(regionName)).build())
.setReadRequestsCount(readRequestCount)
.setWriteRequestsCount(writeRequestCount)
.setStorefileSizeMB((int)storeFileSize.get(Unit.MEGABYTE))
.setStoreUncompressedSizeMB((int)uncompressedStoreFileSize.get(Unit.MEGABYTE))
.setStorefiles(storeFileCount)
.setMemstoreSizeMB((int)memStoreSize.get(Unit.MEGABYTE))
.setDataLocality(locality)
.setCurrentCompactedKVs(compactedCellCount)
.setTotalCompactingKVs(compactingCellCount)
.setLastMajorCompactionTs(df.parse(lastMajorCompactionTime).getTime())
.build();
} catch (ParseException e) {
throw new IllegalArgumentException(e);
}
}
private static ServerLoad createServerLoad(long reportTimestamp,
Size usedHeapSize, Size maxHeapSize, long requestCountPerSecond,
List<ClusterStatusProtos.RegionLoad> regionLoads) {
return new ServerLoad(ClusterStatusProtos.ServerLoad.newBuilder()
.setReportStartTime(reportTimestamp)
.setReportEndTime(reportTimestamp)
.setUsedHeapMB((int)usedHeapSize.get(Unit.MEGABYTE))
.setMaxHeapMB((int)maxHeapSize.get(Unit.MEGABYTE))
.setNumberOfRequests(requestCountPerSecond)
.addAllRegionLoads(regionLoads)
.build());
}
public static void assertRecordsInRegionMode(List<Record> records) {
assertEquals(6, records.size());
for (Record record : records) {
switch (record.get(Field.REGION_NAME).asString()) {
case "table1,,1.00000000000000000000000000000000.":
assertRecordInRegionMode(record, "default", "1", "", "table1",
"00000000000000000000000000000000", "host1:1000", "host1.apache.com,1000,1",
0L, 0L, 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1,
new Size(100, Size.Unit.MEGABYTE), 0.1f, "", 100L, 100L, 100f,
"2019-07-22 00:00:00");
break;
case "table1,1,4.00000000000000000000000000000003.":
assertRecordInRegionMode(record, "default", "4", "", "table1",
"00000000000000000000000000000003", "host2:1001", "host2.apache.com,1001,2",
0L, 0L, 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1,
new Size(100, Size.Unit.MEGABYTE), 0.4f, "1", 100L, 50L, 50f,
"2019-07-22 00:00:03");
break;
case "table2,,5.00000000000000000000000000000004.":
assertRecordInRegionMode(record, "default", "5", "", "table2",
"00000000000000000000000000000004", "host2:1001", "host2.apache.com,1001,2",
0L, 0L, 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2,
new Size(200, Size.Unit.MEGABYTE), 0.5f, "", 200L, 150L, 75f,
"2019-07-22 00:00:04");
break;
case "table2,1,2.00000000000000000000000000000001.":
assertRecordInRegionMode(record, "default", "2", "", "table2",
"00000000000000000000000000000001", "host1:1000", "host1.apache.com,1000,1",
0L, 0L, 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2,
new Size(200, Size.Unit.MEGABYTE), 0.2f, "1", 200L, 50L, 25f,
"2019-07-22 00:00:01");
break;
case "namespace:table3,,6.00000000000000000000000000000005.":
assertRecordInRegionMode(record, "namespace", "6", "", "table3",
"00000000000000000000000000000005", "host2:1001", "host2.apache.com,1001,2",
0L, 0L, 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3,
new Size(300, Size.Unit.MEGABYTE), 0.6f, "", 300L, 200L, 66.66667f,
"2019-07-22 00:00:05");
break;
case "namespace:table3,,3_0001.00000000000000000000000000000002.":
assertRecordInRegionMode(record, "namespace", "3", "1", "table3",
"00000000000000000000000000000002", "host1:1000", "host1.apache.com,1000,1",
0L, 0L, 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3,
new Size(300, Size.Unit.MEGABYTE), 0.3f, "", 300L, 100L, 33.333336f,
"2019-07-22 00:00:02");
break;
default:
fail();
}
}
}
private static void assertRecordInRegionMode(Record record, String namespace, String startCode,
String replicaId, String table, String region, String regionServer, String longRegionServer,
long requestCountPerSecond, long readRequestCountPerSecond, long writeCountRequestPerSecond,
Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles,
Size memStoreSize, float locality, String startKey, long compactingCellCount,
long compactedCellCount, float compactionProgress, String lastMajorCompactionTime) {
assertEquals(21, record.size());
assertEquals(namespace, record.get(Field.NAMESPACE).asString());
assertEquals(startCode, record.get(Field.START_CODE).asString());
assertEquals(replicaId, record.get(Field.REPLICA_ID).asString());
assertEquals(table, record.get(Field.TABLE).asString());
assertEquals(region, record.get(Field.REGION).asString());
assertEquals(regionServer, record.get(Field.REGION_SERVER).asString());
assertEquals(longRegionServer, record.get(Field.LONG_REGION_SERVER).asString());
assertEquals(requestCountPerSecond, record.get(Field.REQUEST_COUNT_PER_SECOND).asLong());
assertEquals(readRequestCountPerSecond,
record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong());
assertEquals(writeCountRequestPerSecond,
record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong());
assertEquals(storeFileSize, record.get(Field.STORE_FILE_SIZE).asSize());
assertEquals(uncompressedStoreFileSize,
record.get(Field.UNCOMPRESSED_STORE_FILE_SIZE).asSize());
assertEquals(numStoreFiles, record.get(Field.NUM_STORE_FILES).asInt());
assertEquals(record.get(Field.MEM_STORE_SIZE).asSize(), memStoreSize);
assertEquals(locality, record.get(Field.LOCALITY).asFloat(), 0.001);
assertEquals(startKey, record.get(Field.START_KEY).asString());
assertEquals(compactingCellCount, record.get(Field.COMPACTING_CELL_COUNT).asLong());
assertEquals(compactedCellCount, record.get(Field.COMPACTED_CELL_COUNT).asLong());
assertEquals(compactionProgress, record.get(Field.COMPACTION_PROGRESS).asFloat(), 0.001);
assertEquals(lastMajorCompactionTime,
record.get(Field.LAST_MAJOR_COMPACTION_TIME).asString());
}
public static void assertRecordsInNamespaceMode(List<Record> records) {
assertEquals(2, records.size());
for (Record record : records) {
switch (record.get(Field.NAMESPACE).asString()) {
case "default":
assertRecordInNamespaceMode(record, 0L, 0L, 0L, new Size(600, Size.Unit.MEGABYTE),
new Size(1200, Size.Unit.MEGABYTE), 6, new Size(600, Size.Unit.MEGABYTE), 4);
break;
case "namespace":
assertRecordInNamespaceMode(record, 0L, 0L, 0L, new Size(600, Size.Unit.MEGABYTE),
new Size(1200, Size.Unit.MEGABYTE), 6, new Size(600, Size.Unit.MEGABYTE), 2);
break;
default:
fail();
}
}
}
private static void assertRecordInNamespaceMode(Record record, long requestCountPerSecond,
long readRequestCountPerSecond, long writeCountRequestPerSecond, Size storeFileSize,
Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, int regionCount) {
assertEquals(9, record.size());
assertEquals(requestCountPerSecond, record.get(Field.REQUEST_COUNT_PER_SECOND).asLong());
assertEquals(readRequestCountPerSecond,
record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong());
assertEquals(writeCountRequestPerSecond,
record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong());
assertEquals(storeFileSize, record.get(Field.STORE_FILE_SIZE).asSize());
assertEquals(uncompressedStoreFileSize,
record.get(Field.UNCOMPRESSED_STORE_FILE_SIZE).asSize());
assertEquals(numStoreFiles, record.get(Field.NUM_STORE_FILES).asInt());
assertEquals(memStoreSize, record.get(Field.MEM_STORE_SIZE).asSize());
assertEquals(regionCount, record.get(Field.REGION_COUNT).asInt());
}
public static void assertRecordsInTableMode(List<Record> records) {
assertEquals(3, records.size());
for (Record record : records) {
String tableName = String.format("%s:%s", record.get(Field.NAMESPACE).asString(),
record.get(Field.TABLE).asString());
switch (tableName) {
case "default:table1":
assertRecordInTableMode(record, 0L, 0L, 0L, new Size(200, Size.Unit.MEGABYTE),
new Size(400, Size.Unit.MEGABYTE), 2, new Size(200, Size.Unit.MEGABYTE), 2);
break;
case "default:table2":
assertRecordInTableMode(record, 0L, 0L, 0L, new Size(400, Size.Unit.MEGABYTE),
new Size(800, Size.Unit.MEGABYTE), 4, new Size(400, Size.Unit.MEGABYTE), 2);
break;
case "namespace:table3":
assertRecordInTableMode(record, 0L, 0L, 0L, new Size(600, Size.Unit.MEGABYTE),
new Size(1200, Size.Unit.MEGABYTE), 6, new Size(600, Size.Unit.MEGABYTE), 2);
break;
default:
fail();
}
}
}
private static void assertRecordInTableMode(Record record, long requestCountPerSecond,
long readRequestCountPerSecond, long writeCountRequestPerSecond, Size storeFileSize,
Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, int regionCount) {
assertEquals(10, record.size());
assertEquals(requestCountPerSecond, record.get(Field.REQUEST_COUNT_PER_SECOND).asLong());
assertEquals(readRequestCountPerSecond,
record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong());
assertEquals(writeCountRequestPerSecond,
record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong());
assertEquals(storeFileSize, record.get(Field.STORE_FILE_SIZE).asSize());
assertEquals(uncompressedStoreFileSize,
record.get(Field.UNCOMPRESSED_STORE_FILE_SIZE).asSize());
assertEquals(numStoreFiles, record.get(Field.NUM_STORE_FILES).asInt());
assertEquals(memStoreSize, record.get(Field.MEM_STORE_SIZE).asSize());
assertEquals(regionCount, record.get(Field.REGION_COUNT).asInt());
}
public static void assertRecordsInRegionServerMode(List<Record> records) {
assertEquals(2, records.size());
for (Record record : records) {
switch (record.get(Field.REGION_SERVER).asString()) {
case "host1:1000":
assertRecordInRegionServerMode(record, "host1.apache.com,1000,1", 0L, 0L, 0L,
new Size(600, Size.Unit.MEGABYTE), new Size(1200, Size.Unit.MEGABYTE), 6,
new Size(600, Size.Unit.MEGABYTE), 3, new Size(100, Size.Unit.MEGABYTE),
new Size(200, Size.Unit.MEGABYTE));
break;
case "host2:1001":
assertRecordInRegionServerMode(record, "host2.apache.com,1001,2", 0L, 0L, 0L,
new Size(600, Size.Unit.MEGABYTE), new Size(1200, Size.Unit.MEGABYTE), 6,
new Size(600, Size.Unit.MEGABYTE), 3, new Size(16, Size.Unit.GIGABYTE),
new Size(32, Size.Unit.GIGABYTE));
break;
default:
fail();
}
}
}
private static void assertRecordInRegionServerMode(Record record, String longRegionServer,
long requestCountPerSecond, long readRequestCountPerSecond, long writeCountRequestPerSecond,
Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles,
Size memStoreSize, int regionCount, Size usedHeapSize, Size maxHeapSize) {
assertEquals(12, record.size());
assertEquals(longRegionServer, record.get(Field.LONG_REGION_SERVER).asString());
assertEquals(requestCountPerSecond, record.get(Field.REQUEST_COUNT_PER_SECOND).asLong());
assertEquals(readRequestCountPerSecond,
record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong());
assertEquals(writeCountRequestPerSecond,
record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong());
assertEquals(storeFileSize, record.get(Field.STORE_FILE_SIZE).asSize());
assertEquals(uncompressedStoreFileSize,
record.get(Field.UNCOMPRESSED_STORE_FILE_SIZE).asSize());
assertEquals(numStoreFiles, record.get(Field.NUM_STORE_FILES).asInt());
assertEquals(memStoreSize, record.get(Field.MEM_STORE_SIZE).asSize());
assertEquals(regionCount, record.get(Field.REGION_COUNT).asInt());
assertEquals(usedHeapSize, record.get(Field.USED_HEAP_SIZE).asSize());
assertEquals(maxHeapSize, record.get(Field.MAX_HEAP_SIZE).asSize());
}
public static void assertSummary(Summary summary) {
assertEquals(HBASE_VERSION, summary.getVersion());
assertEquals(CLUSTER_UUID, summary.getClusterId());
assertEquals(3, summary.getServers());
assertEquals(2, summary.getLiveServers());
assertEquals(1, summary.getDeadServers());
assertEquals(6, summary.getRegionCount());
assertEquals(1, summary.getRitCount());
assertEquals(3.0, summary.getAverageLoad(), 0.001);
assertEquals(300L, summary.getAggregateRequestPerSecond());
}
}

View File

@ -0,0 +1,290 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.field;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category(SmallTests.class)
public class TestFieldValue {
@Test
public void testParseAndAsSomethingMethod() {
// String
FieldValue stringFieldValue = new FieldValue("aaa", FieldValueType.STRING);
assertThat(stringFieldValue.asString(), is("aaa"));
try {
new FieldValue(1, FieldValueType.STRING);
fail();
} catch (IllegalArgumentException ignored) {
}
// Integer
FieldValue integerFieldValue = new FieldValue(100, FieldValueType.INTEGER);
assertThat(integerFieldValue.asInt(), is(100));
integerFieldValue = new FieldValue("100", FieldValueType.INTEGER);
assertThat(integerFieldValue.asInt(), is(100));
try {
new FieldValue("aaa", FieldValueType.INTEGER);
fail();
} catch (IllegalArgumentException ignored) {
}
// Long
FieldValue longFieldValue = new FieldValue(100L, FieldValueType.LONG);
assertThat(longFieldValue.asLong(), is(100L));
longFieldValue = new FieldValue("100", FieldValueType.LONG);
assertThat(longFieldValue.asLong(), is(100L));
try {
new FieldValue("aaa", FieldValueType.LONG);
fail();
} catch (IllegalArgumentException ignored) {
}
try {
new FieldValue(100, FieldValueType.LONG);
fail();
} catch (IllegalArgumentException ignored) {
}
// Float
FieldValue floatFieldValue = new FieldValue(1.0f, FieldValueType.FLOAT);
assertThat(floatFieldValue.asFloat(), is(1.0f));
floatFieldValue = new FieldValue("1", FieldValueType.FLOAT);
assertThat(floatFieldValue.asFloat(), is(1.0f));
try {
new FieldValue("aaa", FieldValueType.FLOAT);
fail();
} catch (IllegalArgumentException ignored) {
}
try {
new FieldValue(1, FieldValueType.FLOAT);
fail();
} catch (IllegalArgumentException ignored) {
}
// Size
FieldValue sizeFieldValue =
new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE);
assertThat(sizeFieldValue.asString(), is("100.0MB"));
assertThat(sizeFieldValue.asSize(), is(new Size(100, Size.Unit.MEGABYTE)));
sizeFieldValue = new FieldValue("100MB", FieldValueType.SIZE);
assertThat(sizeFieldValue.asString(), is("100.0MB"));
assertThat(sizeFieldValue.asSize(), is(new Size(100, Size.Unit.MEGABYTE)));
try {
new FieldValue("100", FieldValueType.SIZE);
fail();
} catch (IllegalArgumentException ignored) {
}
try {
new FieldValue(100, FieldValueType.SIZE);
fail();
} catch (IllegalArgumentException ignored) {
}
// Percent
FieldValue percentFieldValue =
new FieldValue(100f, FieldValueType.PERCENT);
assertThat(percentFieldValue.asString(), is("100.00%"));
assertThat(percentFieldValue.asFloat(), is(100f));
percentFieldValue = new FieldValue("100%", FieldValueType.PERCENT);
assertThat(percentFieldValue.asString(), is("100.00%"));
assertThat(percentFieldValue.asFloat(), is(100f));
percentFieldValue = new FieldValue("100", FieldValueType.PERCENT);
assertThat(percentFieldValue.asString(), is("100.00%"));
assertThat(percentFieldValue.asFloat(), is(100f));
try {
new FieldValue(100, FieldValueType.PERCENT);
fail();
} catch (IllegalArgumentException ignored) {
}
}
@Test
public void testCompareTo() {
// String
FieldValue stringAFieldValue = new FieldValue("a", FieldValueType.STRING);
FieldValue stringAFieldValue2 = new FieldValue("a", FieldValueType.STRING);
FieldValue stringBFieldValue = new FieldValue("b", FieldValueType.STRING);
FieldValue stringCapitalAFieldValue = new FieldValue("A", FieldValueType.STRING);
assertThat(stringAFieldValue.compareTo(stringAFieldValue2), is(0));
assertThat(stringBFieldValue.compareTo(stringAFieldValue), is(1));
assertThat(stringAFieldValue.compareTo(stringBFieldValue), is(-1));
assertThat(stringAFieldValue.compareTo(stringCapitalAFieldValue), is(32));
// Integer
FieldValue integer1FieldValue = new FieldValue(1, FieldValueType.INTEGER);
FieldValue integer1FieldValue2 = new FieldValue(1, FieldValueType.INTEGER);
FieldValue integer2FieldValue = new FieldValue(2, FieldValueType.INTEGER);
assertThat(integer1FieldValue.compareTo(integer1FieldValue2), is(0));
assertThat(integer2FieldValue.compareTo(integer1FieldValue), is(1));
assertThat(integer1FieldValue.compareTo(integer2FieldValue), is(-1));
// Long
FieldValue long1FieldValue = new FieldValue(1L, FieldValueType.LONG);
FieldValue long1FieldValue2 = new FieldValue(1L, FieldValueType.LONG);
FieldValue long2FieldValue = new FieldValue(2L, FieldValueType.LONG);
assertThat(long1FieldValue.compareTo(long1FieldValue2), is(0));
assertThat(long2FieldValue.compareTo(long1FieldValue), is(1));
assertThat(long1FieldValue.compareTo(long2FieldValue), is(-1));
// Float
FieldValue float1FieldValue = new FieldValue(1.0f, FieldValueType.FLOAT);
FieldValue float1FieldValue2 = new FieldValue(1.0f, FieldValueType.FLOAT);
FieldValue float2FieldValue = new FieldValue(2.0f, FieldValueType.FLOAT);
assertThat(float1FieldValue.compareTo(float1FieldValue2), is(0));
assertThat(float2FieldValue.compareTo(float1FieldValue), is(1));
assertThat(float1FieldValue.compareTo(float2FieldValue), is(-1));
// Size
FieldValue size100MBFieldValue =
new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE);
FieldValue size100MBFieldValue2 =
new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE);
FieldValue size200MBFieldValue =
new FieldValue(new Size(200, Size.Unit.MEGABYTE), FieldValueType.SIZE);
assertThat(size100MBFieldValue.compareTo(size100MBFieldValue2), is(0));
assertThat(size200MBFieldValue.compareTo(size100MBFieldValue), is(1));
assertThat(size100MBFieldValue.compareTo(size200MBFieldValue), is(-1));
// Percent
FieldValue percent50FieldValue = new FieldValue(50.0f, FieldValueType.PERCENT);
FieldValue percent50FieldValue2 = new FieldValue(50.0f, FieldValueType.PERCENT);
FieldValue percent100FieldValue = new FieldValue(100.0f, FieldValueType.PERCENT);
assertThat(percent50FieldValue.compareTo(percent50FieldValue2), is(0));
assertThat(percent100FieldValue.compareTo(percent50FieldValue), is(1));
assertThat(percent50FieldValue.compareTo(percent100FieldValue), is(-1));
}
@Test
public void testPlus() {
// String
FieldValue stringFieldValue = new FieldValue("a", FieldValueType.STRING);
FieldValue stringFieldValue2 = new FieldValue("b", FieldValueType.STRING);
assertThat(stringFieldValue.plus(stringFieldValue2).asString(), is("ab"));
// Integer
FieldValue integerFieldValue = new FieldValue(1, FieldValueType.INTEGER);
FieldValue integerFieldValue2 = new FieldValue(2, FieldValueType.INTEGER);
assertThat(integerFieldValue.plus(integerFieldValue2).asInt(), is(3));
// Long
FieldValue longFieldValue = new FieldValue(1L, FieldValueType.LONG);
FieldValue longFieldValue2 = new FieldValue(2L, FieldValueType.LONG);
assertThat(longFieldValue.plus(longFieldValue2).asLong(), is(3L));
// Float
FieldValue floatFieldValue = new FieldValue(1.2f, FieldValueType.FLOAT);
FieldValue floatFieldValue2 = new FieldValue(2.2f, FieldValueType.FLOAT);
assertThat(floatFieldValue.plus(floatFieldValue2).asFloat(), is(3.4f));
// Size
FieldValue sizeFieldValue =
new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE);
FieldValue sizeFieldValue2 =
new FieldValue(new Size(200, Size.Unit.MEGABYTE), FieldValueType.SIZE);
assertThat(sizeFieldValue.plus(sizeFieldValue2).asString(), is("300.0MB"));
assertThat(sizeFieldValue.plus(sizeFieldValue2).asSize(),
is(new Size(300, Size.Unit.MEGABYTE)));
// Percent
FieldValue percentFieldValue = new FieldValue(30f, FieldValueType.PERCENT);
FieldValue percentFieldValue2 = new FieldValue(60f, FieldValueType.PERCENT);
assertThat(percentFieldValue.plus(percentFieldValue2).asString(), is("90.00%"));
assertThat(percentFieldValue.plus(percentFieldValue2).asFloat(), is(90f));
}
@Test
public void testCompareToIgnoreCase() {
FieldValue stringAFieldValue = new FieldValue("a", FieldValueType.STRING);
FieldValue stringCapitalAFieldValue = new FieldValue("A", FieldValueType.STRING);
FieldValue stringCapitalBFieldValue = new FieldValue("B", FieldValueType.STRING);
assertThat(stringAFieldValue.compareToIgnoreCase(stringCapitalAFieldValue), is(0));
assertThat(stringCapitalBFieldValue.compareToIgnoreCase(stringAFieldValue), is(1));
assertThat(stringAFieldValue.compareToIgnoreCase(stringCapitalBFieldValue), is(-1));
}
@Test
public void testOptimizeSize() {
FieldValue sizeFieldValue =
new FieldValue(new Size(1, Size.Unit.BYTE), FieldValueType.SIZE);
assertThat(sizeFieldValue.asString(), is("1.0B"));
sizeFieldValue =
new FieldValue(new Size(1024, Size.Unit.BYTE), FieldValueType.SIZE);
assertThat(sizeFieldValue.asString(), is("1.0KB"));
sizeFieldValue =
new FieldValue(new Size(2 * 1024, Size.Unit.BYTE), FieldValueType.SIZE);
assertThat(sizeFieldValue.asString(), is("2.0KB"));
sizeFieldValue =
new FieldValue(new Size(2 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE);
assertThat(sizeFieldValue.asString(), is("2.0MB"));
sizeFieldValue =
new FieldValue(new Size(1024 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE);
assertThat(sizeFieldValue.asString(), is("1.0GB"));
sizeFieldValue =
new FieldValue(new Size(2 * 1024 * 1024, Size.Unit.MEGABYTE), FieldValueType.SIZE);
assertThat(sizeFieldValue.asString(), is("2.0TB"));
sizeFieldValue =
new FieldValue(new Size(2 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE);
assertThat(sizeFieldValue.asString(), is("2.0PB"));
sizeFieldValue =
new FieldValue(new Size(1024 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE);
assertThat(sizeFieldValue.asString(), is("1024.0PB"));
sizeFieldValue =
new FieldValue(new Size(1, Size.Unit.PETABYTE), FieldValueType.SIZE);
assertThat(sizeFieldValue.asString(), is("1.0PB"));
sizeFieldValue =
new FieldValue(new Size(1024, Size.Unit.PETABYTE), FieldValueType.SIZE);
assertThat(sizeFieldValue.asString(), is("1024.0PB"));
}
}

View File

@ -0,0 +1,80 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.field;
import static org.junit.Assert.assertEquals;
import java.util.Set;
import java.util.TreeSet;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category({MiscTests.class, SmallTests.class})
public class TestSize {
@Test
public void testConversion() {
Size kbSize = new Size(1024D, Size.Unit.MEGABYTE);
assertEquals(1D, kbSize.get(Size.Unit.GIGABYTE), 0);
assertEquals(1024D, kbSize.get(), 0);
assertEquals(1024D * 1024D, kbSize.get(Size.Unit.KILOBYTE), 0);
assertEquals(1024D * 1024D * 1024D, kbSize.get(Size.Unit.BYTE), 0);
}
@Test
public void testCompare() {
Size size00 = new Size(100D, Size.Unit.GIGABYTE);
Size size01 = new Size(100D, Size.Unit.MEGABYTE);
Size size02 = new Size(100D, Size.Unit.BYTE);
Set<Size> sizes = new TreeSet<>();
sizes.add(size00);
sizes.add(size01);
sizes.add(size02);
int count = 0;
for (Size s : sizes) {
switch (count++) {
case 0:
assertEquals(size02, s);
break;
case 1:
assertEquals(size01, s);
break;
default:
assertEquals(size00, s);
break;
}
}
assertEquals(3, count);
}
@Test
public void testEqual() {
assertEquals(new Size(1024D, Size.Unit.TERABYTE),
new Size(1D, Size.Unit.PETABYTE));
assertEquals(new Size(1024D, Size.Unit.GIGABYTE),
new Size(1D, Size.Unit.TERABYTE));
assertEquals(new Size(1024D, Size.Unit.MEGABYTE),
new Size(1D, Size.Unit.GIGABYTE));
assertEquals(new Size(1024D, Size.Unit.KILOBYTE),
new Size(1D, Size.Unit.MEGABYTE));
assertEquals(new Size(1024D, Size.Unit.BYTE),
new Size(1D, Size.Unit.KILOBYTE));
}
}

View File

@ -0,0 +1,46 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.mode;
import java.util.List;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.TestUtils;
import org.junit.Test;
public abstract class TestModeBase {
@Test
public void testGetRecords() {
List<Record> records = getMode().getRecords(TestUtils.createDummyClusterStatus());
assertRecords(records);
}
protected abstract Mode getMode();
protected abstract void assertRecords(List<Record> records);
@Test
public void testDrillDown() {
List<Record> records = getMode().getRecords(TestUtils.createDummyClusterStatus());
for (Record record : records) {
assertDrillDown(record, getMode().drillDown(record));
}
}
protected abstract void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo);
}

View File

@ -0,0 +1,63 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.mode;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
import java.util.List;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.TestUtils;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.experimental.categories.Category;
@Category(SmallTests.class)
public class TestNamespaceMode extends TestModeBase {
@Override
protected Mode getMode() {
return Mode.NAMESPACE;
}
@Override
protected void assertRecords(List<Record> records) {
TestUtils.assertRecordsInNamespaceMode(records);
}
@Override
protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo) {
assertThat(drillDownInfo.getNextMode(), is(Mode.TABLE));
assertThat(drillDownInfo.getInitialFilters().size(), is(1));
switch (currentRecord.get(Field.NAMESPACE).asString()) {
case "default":
assertThat(drillDownInfo.getInitialFilters().get(0).toString(), is("NAMESPACE==default"));
break;
case "namespace":
assertThat(drillDownInfo.getInitialFilters().get(0).toString(),
is("NAMESPACE==namespace"));
break;
default:
fail();
}
}
}

View File

@ -0,0 +1,47 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.mode;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.junit.Assert.assertThat;
import java.util.List;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.TestUtils;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.experimental.categories.Category;
@Category(SmallTests.class)
public class TestRegionMode extends TestModeBase {
@Override
protected Mode getMode() {
return Mode.REGION;
}
@Override
protected void assertRecords(List<Record> records) {
TestUtils.assertRecordsInRegionMode(records);
}
@Override
protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo) {
assertThat(drillDownInfo, is(nullValue()));
}
}

View File

@ -0,0 +1,62 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.mode;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
import java.util.List;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.TestUtils;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.experimental.categories.Category;
@Category(SmallTests.class)
public class TestRegionServerMode extends TestModeBase {
@Override
protected Mode getMode() {
return Mode.REGION_SERVER;
}
@Override
protected void assertRecords(List<Record> records) {
TestUtils.assertRecordsInRegionServerMode(records);
}
@Override
protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo) {
assertThat(drillDownInfo.getNextMode(), is(Mode.REGION));
assertThat(drillDownInfo.getInitialFilters().size(), is(1));
switch (currentRecord.get(Field.REGION_SERVER).asString()) {
case "host1:1000":
assertThat(drillDownInfo.getInitialFilters().get(0).toString(), is("RS==host1:1000"));
break;
case "host2:1001":
assertThat(drillDownInfo.getInitialFilters().get(0).toString(), is("RS==host2:1001"));
break;
default:
fail();
}
}
}

View File

@ -0,0 +1,49 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.mode;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category(SmallTests.class)
public class TestRequestCountPerSecond {
@Test
public void test() {
RequestCountPerSecond requestCountPerSecond = new RequestCountPerSecond();
requestCountPerSecond.refresh(1000, 300, 200);
assertThat(requestCountPerSecond.getRequestCountPerSecond(), is(0L));
assertThat(requestCountPerSecond.getReadRequestCountPerSecond(), is(0L));
assertThat(requestCountPerSecond.getWriteRequestCountPerSecond(), is(0L));
requestCountPerSecond.refresh(2000, 1300, 1200);
assertThat(requestCountPerSecond.getRequestCountPerSecond(), is(2000L));
assertThat(requestCountPerSecond.getReadRequestCountPerSecond(), is(1000L));
assertThat(requestCountPerSecond.getWriteRequestCountPerSecond(), is(1000L));
requestCountPerSecond.refresh(12000, 5300, 2200);
assertThat(requestCountPerSecond.getRequestCountPerSecond(), is(500L));
assertThat(requestCountPerSecond.getReadRequestCountPerSecond(), is(400L));
assertThat(requestCountPerSecond.getWriteRequestCountPerSecond(), is(100L));
}
}

View File

@ -0,0 +1,73 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.mode;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
import java.util.List;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.TestUtils;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.experimental.categories.Category;
@Category(SmallTests.class)
public class TestTableMode extends TestModeBase {
@Override
protected Mode getMode() {
return Mode.TABLE;
}
@Override
protected void assertRecords(List<Record> records) {
TestUtils.assertRecordsInTableMode(records);
}
@Override
protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo) {
assertThat(drillDownInfo.getNextMode(), is(Mode.REGION));
assertThat(drillDownInfo.getInitialFilters().size(), is(2));
String tableName = String.format("%s:%s", currentRecord.get(Field.NAMESPACE).asString(),
currentRecord.get(Field.TABLE).asString());
switch (tableName) {
case "default:table1":
assertThat(drillDownInfo.getInitialFilters().get(0).toString(), is("NAMESPACE==default"));
assertThat(drillDownInfo.getInitialFilters().get(1).toString(), is("TABLE==table1"));
break;
case "default:table2":
assertThat(drillDownInfo.getInitialFilters().get(0).toString(), is("NAMESPACE==default"));
assertThat(drillDownInfo.getInitialFilters().get(1).toString(), is("TABLE==table2"));
break;
case "namespace:table3":
assertThat(drillDownInfo.getInitialFilters().get(0).toString(),
is("NAMESPACE==namespace"));
assertThat(drillDownInfo.getInitialFilters().get(1).toString(), is("TABLE==table3"));
break;
default:
fail();
}
}
}

View File

@ -0,0 +1,151 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.field;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyBoolean;
import static org.mockito.Mockito.anyInt;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.verify;
import java.util.ArrayList;
import java.util.EnumMap;
import java.util.List;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
import org.apache.hadoop.hbase.hbtop.mode.Mode;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
import org.apache.hadoop.hbase.hbtop.screen.top.TopScreenView;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.mockito.InOrder;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
@Category(SmallTests.class)
@RunWith(MockitoJUnitRunner.class)
public class TestFieldScreenPresenter {
@Mock
private FieldScreenView fieldScreenView;
private int sortFieldPosition = -1;
private List<Field> fields;
private EnumMap<Field, Boolean> fieldDisplayMap;
@Mock
private FieldScreenPresenter.ResultListener resultListener;
@Mock
private TopScreenView topScreenView;
private FieldScreenPresenter fieldScreenPresenter;
@Before
public void setup() {
Field sortField = Mode.REGION.getDefaultSortField();
fields = new ArrayList<>();
for (FieldInfo fieldInfo : Mode.REGION.getFieldInfos()) {
fields.add(fieldInfo.getField());
}
fieldDisplayMap = new EnumMap<>(Field.class);
for (FieldInfo fieldInfo : Mode.REGION.getFieldInfos()) {
fieldDisplayMap.put(fieldInfo.getField(), fieldInfo.isDisplayByDefault());
}
fieldScreenPresenter =
new FieldScreenPresenter(fieldScreenView, sortField, fields, fieldDisplayMap, resultListener,
topScreenView);
for (int i = 0; i < fields.size(); i++) {
Field field = fields.get(i);
if (field == sortField) {
sortFieldPosition = i;
break;
}
}
}
@Test
public void testInit() {
fieldScreenPresenter.init();
int modeHeaderMaxLength = "#COMPingCell".length();
int modeDescriptionMaxLength = "Write Request Count per second".length();
verify(fieldScreenView).showFieldScreen(eq("#REQ/S"), eq(fields), eq(fieldDisplayMap),
eq(sortFieldPosition), eq(modeHeaderMaxLength), eq(modeDescriptionMaxLength), eq(false));
}
@Test
public void testChangeSortField() {
fieldScreenPresenter.arrowUp();
fieldScreenPresenter.setSortField();
fieldScreenPresenter.arrowDown();
fieldScreenPresenter.arrowDown();
fieldScreenPresenter.setSortField();
fieldScreenPresenter.pageUp();
fieldScreenPresenter.setSortField();
fieldScreenPresenter.pageDown();
fieldScreenPresenter.setSortField();
InOrder inOrder = inOrder(fieldScreenView);
inOrder.verify(fieldScreenView).showScreenDescription(eq("LRS"));
inOrder.verify(fieldScreenView).showScreenDescription(eq("#READ/S"));
inOrder.verify(fieldScreenView).showScreenDescription(eq(fields.get(0).getHeader()));
inOrder.verify(fieldScreenView).showScreenDescription(
eq(fields.get(fields.size() - 1).getHeader()));
}
@Test
public void testSwitchFieldDisplay() {
fieldScreenPresenter.switchFieldDisplay();
fieldScreenPresenter.switchFieldDisplay();
InOrder inOrder = inOrder(fieldScreenView);
inOrder.verify(fieldScreenView).showField(anyInt(), any(Field.class), eq(false),
anyBoolean(), anyInt(), anyInt(), anyBoolean());
inOrder.verify(fieldScreenView).showField(anyInt(), any(Field.class), eq(true),
anyBoolean(), anyInt(), anyInt(), anyBoolean());
}
@Test
@SuppressWarnings("unchecked")
public void testChangeFieldsOrder() {
fieldScreenPresenter.turnOnMoveMode();
fieldScreenPresenter.arrowUp();
fieldScreenPresenter.turnOffMoveMode();
Field removed = fields.remove(sortFieldPosition);
fields.add(sortFieldPosition - 1, removed);
assertThat(fieldScreenPresenter.transitionToNextScreen(), is((ScreenView) topScreenView));
verify(resultListener).accept(any(Field.class), eq(fields), any(EnumMap.class));
}
}

View File

@ -0,0 +1,75 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.help;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.mockito.Mockito.argThat;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.verify;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
import org.apache.hadoop.hbase.hbtop.screen.top.TopScreenView;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.mockito.ArgumentMatcher;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
@Category(SmallTests.class)
@RunWith(MockitoJUnitRunner.class)
public class TestHelpScreenPresenter {
private static final long TEST_REFRESH_DELAY = 5;
@Mock
private HelpScreenView helpScreenView;
@Mock
private TopScreenView topScreenView;
private HelpScreenPresenter helpScreenPresenter;
@Before
public void setup() {
helpScreenPresenter = new HelpScreenPresenter(helpScreenView, TEST_REFRESH_DELAY,
topScreenView);
}
@Test
public void testInit() {
helpScreenPresenter.init();
verify(helpScreenView).showHelpScreen(eq(TEST_REFRESH_DELAY), argThat(
new ArgumentMatcher<CommandDescription[]>() {
@Override
public boolean matches(Object o) {
return ((CommandDescription[]) o).length == 14;
}
}));
}
@Test
public void testTransitionToTopScreen() {
assertThat(helpScreenPresenter.transitionToNextScreen(), is((ScreenView) topScreenView));
}
}

View File

@ -0,0 +1,140 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.mode;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import java.util.Arrays;
import org.apache.hadoop.hbase.hbtop.mode.Mode;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
import org.apache.hadoop.hbase.hbtop.screen.top.TopScreenView;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
@Category(SmallTests.class)
@RunWith(MockitoJUnitRunner.class)
public class TestModeScreenPresenter {
@Mock
private ModeScreenView modeScreenView;
@Mock
private TopScreenView topScreenView;
@Mock
private ModeScreenPresenter.ResultListener resultListener;
private ModeScreenPresenter createModeScreenPresenter(Mode currentMode) {
return new ModeScreenPresenter(modeScreenView, currentMode, resultListener, topScreenView);
}
@Test
public void testInit() {
ModeScreenPresenter modeScreenPresenter = createModeScreenPresenter(Mode.REGION);
modeScreenPresenter.init();
int modeHeaderMaxLength = Mode.REGION_SERVER.getHeader().length();
int modeDescriptionMaxLength = Mode.REGION_SERVER.getDescription().length();
verify(modeScreenView).showModeScreen(eq(Mode.REGION), eq(Arrays.asList(Mode.values())),
eq(Mode.REGION.ordinal()) , eq(modeHeaderMaxLength), eq(modeDescriptionMaxLength));
}
@Test
public void testSelectNamespaceMode() {
ModeScreenPresenter modeScreenPresenter = createModeScreenPresenter(Mode.REGION);
modeScreenPresenter.arrowUp();
modeScreenPresenter.arrowUp();
assertThat(modeScreenPresenter.transitionToNextScreen(true), is((ScreenView) topScreenView));
verify(resultListener).accept(eq(Mode.NAMESPACE));
}
@Test
public void testSelectTableMode() {
ModeScreenPresenter modeScreenPresenter = createModeScreenPresenter(Mode.REGION);
modeScreenPresenter.arrowUp();
assertThat(modeScreenPresenter.transitionToNextScreen(true), is((ScreenView) topScreenView));
verify(resultListener).accept(eq(Mode.TABLE));
}
@Test
public void testSelectRegionMode() {
ModeScreenPresenter modeScreenPresenter = createModeScreenPresenter(Mode.NAMESPACE);
modeScreenPresenter.arrowDown();
modeScreenPresenter.arrowDown();
assertThat(modeScreenPresenter.transitionToNextScreen(true), is((ScreenView) topScreenView));
verify(resultListener).accept(eq(Mode.REGION));
}
@Test
public void testSelectRegionServerMode() {
ModeScreenPresenter modeScreenPresenter = createModeScreenPresenter(Mode.REGION);
modeScreenPresenter.arrowDown();
assertThat(modeScreenPresenter.transitionToNextScreen(true), is((ScreenView) topScreenView));
verify(resultListener).accept(eq(Mode.REGION_SERVER));
}
@Test
public void testCancelSelectingMode() {
ModeScreenPresenter modeScreenPresenter = createModeScreenPresenter(Mode.REGION);
modeScreenPresenter.arrowDown();
modeScreenPresenter.arrowDown();
assertThat(modeScreenPresenter.transitionToNextScreen(false), is((ScreenView) topScreenView));
verify(resultListener, never()).accept(any(Mode.class));
}
@Test
public void testPageUp() {
ModeScreenPresenter modeScreenPresenter = createModeScreenPresenter(Mode.REGION);
modeScreenPresenter.pageUp();
assertThat(modeScreenPresenter.transitionToNextScreen(true), is((ScreenView) topScreenView));
verify(resultListener).accept(eq(Mode.values()[0]));
}
@Test
public void testPageDown() {
ModeScreenPresenter modeScreenPresenter = createModeScreenPresenter(Mode.REGION);
modeScreenPresenter.pageDown();
assertThat(modeScreenPresenter.transitionToNextScreen(true), is((ScreenView) topScreenView));
Mode[] modes = Mode.values();
verify(resultListener).accept(eq(modes[modes.length - 1]));
}
}

View File

@ -0,0 +1,91 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.mockito.Mockito.argThat;
import static org.mockito.Mockito.verify;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hbase.hbtop.RecordFilter;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
import org.apache.hadoop.hbase.hbtop.mode.Mode;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.mockito.ArgumentMatcher;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
@Category(SmallTests.class)
@RunWith(MockitoJUnitRunner.class)
public class TestFilterDisplayModeScreenPresenter {
@Mock
private FilterDisplayModeScreenView filterDisplayModeScreenView;
@Mock
private TopScreenView topScreenView;
private FilterDisplayModeScreenPresenter filterDisplayModeScreenPresenter;
@Before
public void setup() {
List<Field> fields = new ArrayList<>();
for (FieldInfo fieldInfo : Mode.REGION.getFieldInfos()) {
fields.add(fieldInfo.getField());
}
List<RecordFilter> filters = new ArrayList<>();
filters.add(RecordFilter.parse("NAMESPACE==namespace", fields, true));
filters.add(RecordFilter.parse("TABLE==table", fields, true));
filterDisplayModeScreenPresenter = new FilterDisplayModeScreenPresenter(
filterDisplayModeScreenView, filters, topScreenView);
}
@Test
public void testInit() {
filterDisplayModeScreenPresenter.init();
verify(filterDisplayModeScreenView).showFilters(argThat(
new ArgumentMatcher<List<RecordFilter>>() {
@Override
@SuppressWarnings("unchecked")
public boolean matches(Object argument) {
List<RecordFilter> filters = (List<RecordFilter>) argument;
return filters.size() == 2
&& filters.get(0).toString().equals("NAMESPACE==namespace")
&& filters.get(1).toString().equals("TABLE==table");
}
}));
}
@Test
public void testReturnToTopScreen() {
assertThat(filterDisplayModeScreenPresenter.returnToNextScreen(),
is((ScreenView) topScreenView));
}
}

View File

@ -0,0 +1,198 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.mockito.InOrder;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
@Category(SmallTests.class)
@RunWith(MockitoJUnitRunner.class)
public class TestInputModeScreenPresenter {
private static final String TEST_INPUT_MESSAGE = "test input message";
@Mock
private InputModeScreenView inputModeScreenView;
@Mock
private TopScreenView topScreenView;
@Mock
private InputModeScreenPresenter.ResultListener resultListener;
private InputModeScreenPresenter inputModeScreenPresenter;
@Before
public void setup() {
List<String> histories = new ArrayList<>();
histories.add("history1");
histories.add("history2");
inputModeScreenPresenter = new InputModeScreenPresenter(inputModeScreenView,
TEST_INPUT_MESSAGE, histories, resultListener);
}
@Test
public void testInit() {
inputModeScreenPresenter.init();
verify(inputModeScreenView).showInput(eq(TEST_INPUT_MESSAGE), eq(""), eq(0));
}
@Test
public void testCharacter() {
inputModeScreenPresenter.character('a');
inputModeScreenPresenter.character('b');
inputModeScreenPresenter.character('c');
InOrder inOrder = inOrder(inputModeScreenView);
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("a"), eq(1));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("ab"), eq(2));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("abc"), eq(3));
}
@Test
public void testArrowLeftAndRight() {
inputModeScreenPresenter.character('a');
inputModeScreenPresenter.character('b');
inputModeScreenPresenter.character('c');
inputModeScreenPresenter.arrowLeft();
inputModeScreenPresenter.arrowLeft();
inputModeScreenPresenter.arrowLeft();
inputModeScreenPresenter.arrowLeft();
inputModeScreenPresenter.arrowRight();
inputModeScreenPresenter.arrowRight();
inputModeScreenPresenter.arrowRight();
inputModeScreenPresenter.arrowRight();
InOrder inOrder = inOrder(inputModeScreenView);
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("a"), eq(1));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("ab"), eq(2));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("abc"), eq(3));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("abc"), eq(2));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("abc"), eq(1));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("abc"), eq(0));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("abc"), eq(1));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("abc"), eq(2));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("abc"), eq(3));
}
@Test
public void testHomeAndEnd() {
inputModeScreenPresenter.character('a');
inputModeScreenPresenter.character('b');
inputModeScreenPresenter.character('c');
inputModeScreenPresenter.home();
inputModeScreenPresenter.home();
inputModeScreenPresenter.end();
inputModeScreenPresenter.end();
InOrder inOrder = inOrder(inputModeScreenView);
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("a"), eq(1));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("ab"), eq(2));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("abc"), eq(3));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("abc"), eq(0));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("abc"), eq(3));
}
@Test
public void testBackspace() {
inputModeScreenPresenter.character('a');
inputModeScreenPresenter.character('b');
inputModeScreenPresenter.character('c');
inputModeScreenPresenter.backspace();
inputModeScreenPresenter.backspace();
inputModeScreenPresenter.backspace();
inputModeScreenPresenter.backspace();
InOrder inOrder = inOrder(inputModeScreenView);
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("a"), eq(1));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("ab"), eq(2));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("abc"), eq(3));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("ab"), eq(2));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("a"), eq(1));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq(""), eq(0));
}
@Test
public void testDelete() {
inputModeScreenPresenter.character('a');
inputModeScreenPresenter.character('b');
inputModeScreenPresenter.character('c');
inputModeScreenPresenter.delete();
inputModeScreenPresenter.arrowLeft();
inputModeScreenPresenter.delete();
InOrder inOrder = inOrder(inputModeScreenView);
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("a"), eq(1));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("ab"), eq(2));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("abc"), eq(3));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("abc"), eq(2));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("ab"), eq(2));
}
@Test
public void testHistories() {
inputModeScreenPresenter.character('a');
inputModeScreenPresenter.character('b');
inputModeScreenPresenter.character('c');
inputModeScreenPresenter.arrowUp();
inputModeScreenPresenter.arrowUp();
inputModeScreenPresenter.arrowUp();
inputModeScreenPresenter.arrowDown();
inputModeScreenPresenter.arrowDown();
inputModeScreenPresenter.arrowDown();
InOrder inOrder = inOrder(inputModeScreenView);
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("a"), eq(1));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("ab"), eq(2));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("abc"), eq(3));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("history2"), eq(8));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("history1"), eq(8));
inOrder.verify(inputModeScreenView).showInput(any(String.class), eq("history2"), eq(8));
}
@Test
public void testReturnToTopScreen() {
when(resultListener.apply(any(String.class))).thenReturn(topScreenView);
inputModeScreenPresenter.character('a');
inputModeScreenPresenter.character('b');
inputModeScreenPresenter.character('c');
assertThat(inputModeScreenPresenter.returnToNextScreen(), is((ScreenView) topScreenView));
verify(resultListener).apply(eq("abc"));
}
}

View File

@ -0,0 +1,65 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.verify;
import org.apache.hadoop.hbase.hbtop.screen.ScreenView;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
@Category(SmallTests.class)
@RunWith(MockitoJUnitRunner.class)
public class TestMessageModeScreenPresenter {
private static final String TEST_MESSAGE = "test message";
@Mock
private MessageModeScreenView messageModeScreenView;
@Mock
private TopScreenView topScreenView;
private MessageModeScreenPresenter messageModeScreenPresenter;
@Before
public void setup() {
messageModeScreenPresenter = new MessageModeScreenPresenter(messageModeScreenView,
TEST_MESSAGE, topScreenView);
}
@Test
public void testInit() {
messageModeScreenPresenter.init();
verify(messageModeScreenView).showMessage(eq(TEST_MESSAGE));
}
@Test
public void testReturnToTopScreen() {
assertThat(messageModeScreenPresenter.returnToNextScreen(), is((ScreenView) topScreenView));
}
}

View File

@ -0,0 +1,293 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category(SmallTests.class)
public class TestPaging {
@Test
public void testArrowUpAndArrowDown() {
Paging paging = new Paging();
paging.updatePageSize(3);
paging.updateRecordsSize(5);
assertPaging(paging, 0, 0, 3);
paging.arrowDown();
assertPaging(paging, 1, 0, 3);
paging.arrowDown();
assertPaging(paging, 2, 0, 3);
paging.arrowDown();
assertPaging(paging, 3, 1, 4);
paging.arrowDown();
assertPaging(paging, 4, 2, 5);
paging.arrowDown();
assertPaging(paging, 4, 2, 5);
paging.arrowUp();
assertPaging(paging, 3, 2, 5);
paging.arrowUp();
assertPaging(paging, 2, 2, 5);
paging.arrowUp();
assertPaging(paging, 1, 1, 4);
paging.arrowUp();
assertPaging(paging, 0, 0, 3);
paging.arrowUp();
assertPaging(paging, 0, 0, 3);
}
@Test
public void testPageUpAndPageDown() {
Paging paging = new Paging();
paging.updatePageSize(3);
paging.updateRecordsSize(8);
assertPaging(paging, 0, 0, 3);
paging.pageDown();
assertPaging(paging, 3, 3, 6);
paging.pageDown();
assertPaging(paging, 6, 5, 8);
paging.pageDown();
assertPaging(paging, 7, 5, 8);
paging.pageDown();
assertPaging(paging, 7, 5, 8);
paging.pageUp();
assertPaging(paging, 4, 4, 7);
paging.pageUp();
assertPaging(paging, 1, 1, 4);
paging.pageUp();
assertPaging(paging, 0, 0, 3);
paging.pageUp();
assertPaging(paging, 0, 0, 3);
}
@Test
public void testInit() {
Paging paging = new Paging();
paging.updatePageSize(3);
paging.updateRecordsSize(5);
assertPaging(paging, 0, 0, 3);
paging.pageDown();
paging.pageDown();
paging.pageDown();
paging.pageDown();
paging.init();
assertPaging(paging, 0, 0, 3);
}
@Test
public void testWhenPageSizeGraterThanRecordsSize() {
Paging paging = new Paging();
paging.updatePageSize(5);
paging.updateRecordsSize(3);
assertPaging(paging, 0, 0, 3);
paging.arrowDown();
assertPaging(paging, 1, 0, 3);
paging.arrowDown();
assertPaging(paging, 2, 0, 3);
paging.arrowDown();
assertPaging(paging, 2, 0, 3);
paging.arrowUp();
assertPaging(paging, 1, 0, 3);
paging.arrowUp();
assertPaging(paging, 0, 0, 3);
paging.arrowUp();
assertPaging(paging, 0, 0, 3);
paging.pageDown();
assertPaging(paging, 2, 0, 3);
paging.pageDown();
assertPaging(paging, 2, 0, 3);
paging.pageUp();
assertPaging(paging, 0, 0, 3);
paging.pageUp();
assertPaging(paging, 0, 0, 3);
}
@Test
public void testWhenPageSizeIsZero() {
Paging paging = new Paging();
paging.updatePageSize(0);
paging.updateRecordsSize(5);
assertPaging(paging, 0, 0, 0);
paging.arrowDown();
assertPaging(paging, 1, 0, 0);
paging.arrowUp();
assertPaging(paging, 0, 0, 0);
paging.pageDown();
assertPaging(paging, 0, 0, 0);
paging.pageUp();
assertPaging(paging, 0, 0, 0);
}
@Test
public void testWhenRecordsSizeIsZero() {
Paging paging = new Paging();
paging.updatePageSize(3);
paging.updateRecordsSize(0);
assertPaging(paging, 0, 0, 0);
paging.arrowDown();
assertPaging(paging, 0, 0, 0);
paging.arrowUp();
assertPaging(paging, 0, 0, 0);
paging.pageDown();
assertPaging(paging, 0, 0, 0);
paging.pageUp();
assertPaging(paging, 0, 0, 0);
}
@Test
public void testWhenChangingPageSizeDynamically() {
Paging paging = new Paging();
paging.updatePageSize(3);
paging.updateRecordsSize(5);
assertPaging(paging, 0, 0, 3);
paging.arrowDown();
assertPaging(paging, 1, 0, 3);
paging.updatePageSize(2);
assertPaging(paging, 1, 0, 2);
paging.arrowDown();
assertPaging(paging, 2, 1, 3);
paging.arrowDown();
assertPaging(paging, 3, 2, 4);
paging.updatePageSize(4);
assertPaging(paging, 3, 1, 5);
paging.updatePageSize(5);
assertPaging(paging, 3, 0, 5);
paging.updatePageSize(0);
assertPaging(paging, 3, 0, 0);
paging.arrowDown();
assertPaging(paging, 4, 0, 0);
paging.arrowUp();
assertPaging(paging, 3, 0, 0);
paging.pageDown();
assertPaging(paging, 3, 0, 0);
paging.pageUp();
assertPaging(paging, 3, 0, 0);
paging.updatePageSize(1);
assertPaging(paging, 3, 3, 4);
}
@Test
public void testWhenChangingRecordsSizeDynamically() {
Paging paging = new Paging();
paging.updatePageSize(3);
paging.updateRecordsSize(5);
assertPaging(paging, 0, 0, 3);
paging.updateRecordsSize(2);
assertPaging(paging, 0, 0, 2);
assertThat(paging.getCurrentPosition(), is(0));
assertThat(paging.getPageStartPosition(), is(0));
assertThat(paging.getPageEndPosition(), is(2));
paging.arrowDown();
assertPaging(paging, 1, 0, 2);
paging.updateRecordsSize(3);
assertPaging(paging, 1, 0, 3);
paging.arrowDown();
assertPaging(paging, 2, 0, 3);
paging.updateRecordsSize(1);
assertPaging(paging, 0, 0, 1);
paging.updateRecordsSize(0);
assertPaging(paging, 0, 0, 0);
paging.arrowDown();
assertPaging(paging, 0, 0, 0);
paging.arrowUp();
assertPaging(paging, 0, 0, 0);
paging.pageDown();
assertPaging(paging, 0, 0, 0);
paging.pageUp();
assertPaging(paging, 0, 0, 0);
}
private void assertPaging(Paging paging, int currentPosition, int pageStartPosition,
int pageEndPosition) {
assertThat(paging.getCurrentPosition(), is(currentPosition));
assertThat(paging.getPageStartPosition(), is(pageStartPosition));
assertThat(paging.getPageEndPosition(), is(pageEndPosition));
}
}

View File

@ -0,0 +1,200 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.RecordFilter;
import org.apache.hadoop.hbase.hbtop.TestUtils;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
import org.apache.hadoop.hbase.hbtop.field.FieldValue;
import org.apache.hadoop.hbase.hbtop.mode.Mode;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
@Category(SmallTests.class)
@RunWith(MockitoJUnitRunner.class)
public class TestTopScreenModel {
@Mock
private Admin admin;
private TopScreenModel topScreenModel;
private List<Field> fields;
@Before
public void setup() throws IOException {
when(admin.getClusterStatus()).thenReturn(TestUtils.createDummyClusterStatus());
topScreenModel = new TopScreenModel(admin, Mode.REGION);
fields = new ArrayList<>();
for (FieldInfo fieldInfo : Mode.REGION.getFieldInfos()) {
fields.add(fieldInfo.getField());
}
}
@Test
public void testSummary() {
topScreenModel.refreshMetricsData();
Summary summary = topScreenModel.getSummary();
TestUtils.assertSummary(summary);
}
@Test
public void testRecords() {
// Region Mode
topScreenModel.refreshMetricsData();
TestUtils.assertRecordsInRegionMode(topScreenModel.getRecords());
// Namespace Mode
topScreenModel.switchMode(Mode.NAMESPACE, null, false);
topScreenModel.refreshMetricsData();
TestUtils.assertRecordsInNamespaceMode(topScreenModel.getRecords());
// Table Mode
topScreenModel.switchMode(Mode.TABLE, null, false);
topScreenModel.refreshMetricsData();
TestUtils.assertRecordsInTableMode(topScreenModel.getRecords());
// Namespace Mode
topScreenModel.switchMode(Mode.REGION_SERVER, null, false);
topScreenModel.refreshMetricsData();
TestUtils.assertRecordsInRegionServerMode(topScreenModel.getRecords());
}
@Test
public void testSort() {
// The sort key is LOCALITY
topScreenModel.setSortFieldAndFields(Field.LOCALITY, fields);
FieldValue previous = null;
// Test for ascending sort
topScreenModel.refreshMetricsData();
for (Record record : topScreenModel.getRecords()) {
FieldValue current = record.get(Field.LOCALITY);
if (previous != null) {
assertTrue(current.compareTo(previous) < 0);
}
previous = current;
}
// Test for descending sort
topScreenModel.switchSortOrder();
topScreenModel.refreshMetricsData();
previous = null;
for (Record record : topScreenModel.getRecords()) {
FieldValue current = record.get(Field.LOCALITY);
if (previous != null) {
assertTrue(current.compareTo(previous) > 0);
}
previous = current;
}
}
@Test
public void testFilters() {
topScreenModel.addFilter("TABLE==table1", false);
topScreenModel.refreshMetricsData();
for (Record record : topScreenModel.getRecords()) {
FieldValue value = record.get(Field.TABLE);
assertThat(value.asString(), is("table1"));
}
topScreenModel.clearFilters();
topScreenModel.addFilter("TABLE==TABLE1", false);
topScreenModel.refreshMetricsData();
assertThat(topScreenModel.getRecords().size(), is(0));
// Test for ignore case
topScreenModel.clearFilters();
topScreenModel.addFilter("TABLE==TABLE1", true);
topScreenModel.refreshMetricsData();
for (Record record : topScreenModel.getRecords()) {
FieldValue value = record.get(Field.TABLE);
assertThat(value.asString(), is("table1"));
}
}
@Test
public void testFilterHistories() {
topScreenModel.addFilter("TABLE==table1", false);
topScreenModel.addFilter("TABLE==table2", false);
topScreenModel.addFilter("TABLE==table3", false);
assertThat(topScreenModel.getFilterHistories().get(0), is("TABLE==table1"));
assertThat(topScreenModel.getFilterHistories().get(1), is("TABLE==table2"));
assertThat(topScreenModel.getFilterHistories().get(2), is("TABLE==table3"));
}
@Test
public void testSwitchMode() {
topScreenModel.switchMode(Mode.TABLE, null, false);
assertThat(topScreenModel.getCurrentMode(), is(Mode.TABLE));
// Test for initialFilters
List<RecordFilter> initialFilters = Arrays.asList(
RecordFilter.parse("TABLE==table1", fields, true),
RecordFilter.parse("TABLE==table2", fields, true));
topScreenModel.switchMode(Mode.TABLE, initialFilters, false);
assertThat(topScreenModel.getFilters().size(), is(initialFilters.size()));
for (int i = 0; i < topScreenModel.getFilters().size(); i++) {
assertThat(topScreenModel.getFilters().get(i).toString(),
is(initialFilters.get(i).toString()));
}
// Test when keepSortFieldAndSortOrderIfPossible is true
topScreenModel.setSortFieldAndFields(Field.NAMESPACE, fields);
topScreenModel.switchMode(Mode.NAMESPACE, null, true);
assertThat(topScreenModel.getCurrentSortField(), is(Field.NAMESPACE));
}
@Test
public void testDrillDown() {
topScreenModel.switchMode(Mode.TABLE, null, false);
topScreenModel.setSortFieldAndFields(Field.NAMESPACE, fields);
topScreenModel.refreshMetricsData();
boolean success = topScreenModel.drillDown(topScreenModel.getRecords().get(0));
assertThat(success, is(true));
assertThat(topScreenModel.getFilters().get(0).toString(), is("NAMESPACE==namespace"));
assertThat(topScreenModel.getFilters().get(1).toString(), is("TABLE==table3"));
assertThat(topScreenModel.getCurrentSortField(), is(Field.NAMESPACE));
}
}

View File

@ -0,0 +1,291 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.screen.top;
import static org.apache.hadoop.hbase.hbtop.Record.entry;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.argThat;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.hbase.hbtop.Record;
import org.apache.hadoop.hbase.hbtop.field.Field;
import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.mockito.ArgumentMatcher;
import org.mockito.InOrder;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
@Category(SmallTests.class)
@RunWith(MockitoJUnitRunner.class)
public class TestTopScreenPresenter {
private static final List<FieldInfo> TEST_FIELD_INFOS = Arrays.asList(
new FieldInfo(Field.REGION, 10, true),
new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true),
new FieldInfo(Field.LOCALITY, 10, true)
);
private static final List<Record> TEST_RECORDS = Arrays.asList(
Record.ofEntries(
entry(Field.REGION, "region1"),
entry(Field.REQUEST_COUNT_PER_SECOND, 1L),
entry(Field.LOCALITY, 0.3f)),
Record.ofEntries(
entry(Field.REGION, "region2"),
entry(Field.REQUEST_COUNT_PER_SECOND, 2L),
entry(Field.LOCALITY, 0.2f)),
Record.ofEntries(
entry(Field.REGION, "region3"),
entry(Field.REQUEST_COUNT_PER_SECOND, 3L),
entry(Field.LOCALITY, 0.1f))
);
private static final Summary TEST_SUMMARY = new Summary(
"00:00:01", "3.0.0-SNAPSHOT", "01234567-89ab-cdef-0123-456789abcdef",
3, 2, 1, 6, 1, 3.0, 300);
@Mock
private TopScreenView topScreenView;
@Mock
private TopScreenModel topScreenModel;
private TopScreenPresenter topScreenPresenter;
@Before
public void setup() {
when(topScreenView.getTerminalSize()).thenReturn(new TerminalSize(100, 100));
when(topScreenView.getPageSize()).thenReturn(100);
when(topScreenModel.getFieldInfos()).thenReturn(TEST_FIELD_INFOS);
List<Field> fields = new ArrayList<>();
for (FieldInfo fieldInfo : TEST_FIELD_INFOS) {
fields.add(fieldInfo.getField());
}
when(topScreenModel.getFields()).thenReturn(fields);
when(topScreenModel.getRecords()).thenReturn(TEST_RECORDS);
when(topScreenModel.getSummary()).thenReturn(TEST_SUMMARY);
topScreenPresenter = new TopScreenPresenter(topScreenView, 3000, topScreenModel);
}
@Test
public void testRefresh() {
topScreenPresenter.init();
topScreenPresenter.refresh(true);
verify(topScreenView).showTopScreen(
argThat(new ArgumentMatcher<Summary>() {
@Override
public boolean matches(Object argument) {
return assertSummary((Summary) argument);
}
}), argThat(new ArgumentMatcher<List<Header>>() {
@Override
@SuppressWarnings("unchecked")
public boolean matches(Object argument) {
return assertHeaders((List<Header>) argument);
}
}), argThat(new ArgumentMatcher<List<Record>>() {
@Override
@SuppressWarnings("unchecked")
public boolean matches(Object argument) {
return assertRecords((List<Record>) argument);
}
}), argThat(new ArgumentMatcher<Record>() {
@Override
public boolean matches(Object argument) {
return assertSelectedRecord((Record) argument, 0);
}
}));
}
@Test
public void testVerticalScrolling() {
topScreenPresenter.init();
topScreenPresenter.refresh(true);
topScreenPresenter.arrowDown();
topScreenPresenter.arrowDown();
topScreenPresenter.arrowDown();
topScreenPresenter.arrowDown();
topScreenPresenter.arrowDown();
topScreenPresenter.arrowDown();
topScreenPresenter.arrowUp();
topScreenPresenter.arrowUp();
topScreenPresenter.arrowUp();
topScreenPresenter.pageDown();
topScreenPresenter.pageDown();
topScreenPresenter.pageUp();
topScreenPresenter.pageUp();
InOrder inOrder = inOrder(topScreenView);
verifyVerticalScrolling(inOrder, 0);
verifyVerticalScrolling(inOrder, 1);
verifyVerticalScrolling(inOrder, 2);
verifyVerticalScrolling(inOrder, 2);
verifyVerticalScrolling(inOrder, 1);
verifyVerticalScrolling(inOrder, 0);
verifyVerticalScrolling(inOrder, 0);
verifyVerticalScrolling(inOrder, 2);
verifyVerticalScrolling(inOrder, 2);
verifyVerticalScrolling(inOrder, 0);
verifyVerticalScrolling(inOrder, 0);
}
@SuppressWarnings("unchecked")
private void verifyVerticalScrolling(InOrder inOrder, final int expectedSelectedRecodeIndex) {
inOrder.verify(topScreenView).showTopScreen(any(Summary.class), any(List.class),
any(List.class), argThat(new ArgumentMatcher<Record>() {
@Override
public boolean matches(Object argument) {
return assertSelectedRecord((Record) argument, expectedSelectedRecodeIndex);
}
})
);
}
@Test
public void testHorizontalScrolling() {
topScreenPresenter.init();
topScreenPresenter.refresh(true);
topScreenPresenter.arrowRight();
topScreenPresenter.arrowRight();
topScreenPresenter.arrowRight();
topScreenPresenter.arrowLeft();
topScreenPresenter.arrowLeft();
topScreenPresenter.arrowLeft();
topScreenPresenter.end();
topScreenPresenter.end();
topScreenPresenter.home();
topScreenPresenter.home();
InOrder inOrder = inOrder(topScreenView);
verifyHorizontalScrolling(inOrder, 3);
verifyHorizontalScrolling(inOrder, 2);
verifyHorizontalScrolling(inOrder, 1);
verifyHorizontalScrolling(inOrder, 1);
verifyHorizontalScrolling(inOrder, 2);
verifyHorizontalScrolling(inOrder, 3);
verifyHorizontalScrolling(inOrder, 3);
verifyHorizontalScrolling(inOrder, 1);
verifyHorizontalScrolling(inOrder, 1);
verifyHorizontalScrolling(inOrder, 3);
verifyHorizontalScrolling(inOrder, 3);
}
@SuppressWarnings("unchecked")
private void verifyHorizontalScrolling(InOrder inOrder, final int expectedHeaderCount) {
inOrder.verify(topScreenView).showTopScreen(any(Summary.class),
argThat(new ArgumentMatcher<List<Header>>() {
@Override
public boolean matches(Object argument) {
List<Header> headers = (List<Header>) argument;
return headers.size() == expectedHeaderCount;
}
}), any(List.class), any(Record.class));
}
private boolean assertSummary(Summary actual) {
return actual.getCurrentTime().equals(TEST_SUMMARY.getCurrentTime())
&& actual.getVersion().equals(TEST_SUMMARY.getVersion())
&& actual.getClusterId().equals(TEST_SUMMARY.getClusterId())
&& actual.getServers() == TEST_SUMMARY.getServers()
&& actual.getLiveServers() == TEST_SUMMARY.getLiveServers()
&& actual.getDeadServers() == TEST_SUMMARY.getDeadServers()
&& actual.getRegionCount() == TEST_SUMMARY.getRegionCount()
&& actual.getRitCount() == TEST_SUMMARY.getRitCount()
&& actual.getAverageLoad() == TEST_SUMMARY.getAverageLoad()
&& actual.getAggregateRequestPerSecond() == TEST_SUMMARY.getAggregateRequestPerSecond();
}
private boolean assertHeaders(List<Header> actual) {
List<Header> expected = new ArrayList<>();
for (FieldInfo fieldInfo : TEST_FIELD_INFOS) {
expected.add(new Header(fieldInfo.getField(), fieldInfo.getDefaultLength()));
}
if (actual.size() != expected.size()) {
return false;
}
for (int i = 0; i < actual.size(); i++) {
if (actual.get(i).getField() != expected.get(i).getField()) {
return false;
}
if (actual.get(i).getLength() != expected.get(i).getLength()) {
return false;
}
}
return true;
}
private boolean assertRecords(List<Record> actual) {
if (actual.size() != TEST_RECORDS.size()) {
return false;
}
for (int i = 0; i < actual.size(); i++) {
if (!assertRecord(actual.get(i), TEST_RECORDS.get(i))) {
return false;
}
}
return true;
}
private boolean assertSelectedRecord(Record actual, int expectedSelectedRecodeIndex) {
return assertRecord(actual, TEST_RECORDS.get(expectedSelectedRecodeIndex));
}
private boolean assertRecord(Record actual, Record expected) {
return actual.get(Field.REGION).equals(expected.get(Field.REGION)) && actual
.get(Field.REQUEST_COUNT_PER_SECOND).equals(expected.get(Field.REQUEST_COUNT_PER_SECOND))
&& actual.get(Field.LOCALITY).equals(expected.get(Field.LOCALITY));
}
}

View File

@ -0,0 +1,77 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal.impl;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
public final class TestCursor {
private TestCursor() {
}
public static void main(String[] args) throws Exception {
try (Terminal terminal = new TerminalImpl()) {
terminal.refresh();
terminal.setCursorPosition(0, 0);
terminal.getTerminalPrinter(0).print("aaa").endOfLine();
terminal.refresh();
TimeUnit.SECONDS.sleep(1);
terminal.getTerminalPrinter(0).print("bbb").endOfLine();
terminal.refresh();
TimeUnit.SECONDS.sleep(1);
terminal.setCursorPosition(1, 0);
terminal.refresh();
TimeUnit.SECONDS.sleep(1);
terminal.setCursorPosition(2, 0);
terminal.refresh();
TimeUnit.SECONDS.sleep(1);
terminal.setCursorPosition(3, 0);
terminal.refresh();
TimeUnit.SECONDS.sleep(1);
terminal.setCursorPosition(0, 1);
terminal.refresh();
TimeUnit.SECONDS.sleep(1);
terminal.getTerminalPrinter(1).print("ccc").endOfLine();
terminal.refresh();
TimeUnit.SECONDS.sleep(1);
terminal.getTerminalPrinter(3).print("Press any key to finish").endOfLine();
terminal.refresh();
while (true) {
KeyPress keyPress = terminal.pollKeyPress();
if (keyPress == null) {
TimeUnit.MILLISECONDS.sleep(100);
continue;
}
break;
}
}
}
}

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal.impl;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
public final class TestKeyPress {
private TestKeyPress() {
}
public static void main(String[] args) throws Exception {
try (Terminal terminal = new TerminalImpl()) {
terminal.hideCursor();
terminal.refresh();
while (true) {
KeyPress keyPress = terminal.pollKeyPress();
if (keyPress == null) {
TimeUnit.MILLISECONDS.sleep(100);
continue;
}
terminal.getTerminalPrinter(0).print(keyPress.toString()).endOfLine();
terminal.refresh();
if (keyPress.getType() == KeyPress.Type.F12) {
break;
}
}
}
}
}

View File

@ -0,0 +1,58 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.hbtop.terminal.impl;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
public final class TestTerminalPrinter {
private TestTerminalPrinter() {
}
public static void main(String[] args) throws Exception {
try (Terminal terminal = new TerminalImpl()) {
terminal.hideCursor();
terminal.refresh();
TerminalPrinter printer = terminal.getTerminalPrinter(0);
printer.print("Normal string").endOfLine();
printer.startHighlight().print("Highlighted string").stopHighlight().endOfLine();
printer.startBold().print("Bold string").stopBold().endOfLine();
printer.startHighlight().startBold().print("Highlighted bold string")
.stopBold().stopHighlight().endOfLine();
printer.endOfLine();
printer.print("Press any key to finish").endOfLine();
terminal.refresh();
while (true) {
KeyPress keyPress = terminal.pollKeyPress();
if (keyPress == null) {
TimeUnit.MILLISECONDS.sleep(100);
continue;
}
break;
}
}
}
}

View File

@ -83,6 +83,7 @@
<module>hbase-archetypes</module>
<module>hbase-metrics-api</module>
<module>hbase-metrics</module>
<module>hbase-hbtop</module>
</modules>
<!--Add apache snapshots in case we want to use unreleased versions of plugins:
e.g. surefire 2.18-SNAPSHOT-->
@ -1267,6 +1268,7 @@
<commons-codec.version>1.9</commons-codec.version>
<!-- pretty outdated -->
<commons-io.version>2.4</commons-io.version>
<commons-lang3.version>3.8.1</commons-lang3.version>
<commons-lang.version>2.6</commons-lang.version>
<commons-logging.version>1.2</commons-logging.version>
<commons-math.version>2.2</commons-math.version>
@ -1659,6 +1661,11 @@
<artifactId>commons-lang</artifactId>
<version>${commons-lang.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>${commons-lang3.version}</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>