HBASE-18621 Refactor ClusterOptions before applying to code base
Signed-off-by: Chia-Ping Tsai <chia7712@gmail.com>
This commit is contained in:
parent
af21572427
commit
77ca743d09
|
@ -29,6 +29,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.master.RegionState;
|
import org.apache.hadoop.hbase.master.RegionState;
|
||||||
import org.apache.hadoop.io.VersionedWritable;
|
import org.apache.hadoop.io.VersionedWritable;
|
||||||
|
|
||||||
|
import com.google.common.base.Objects;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Status information on the HBase cluster.
|
* Status information on the HBase cluster.
|
||||||
|
@ -47,26 +49,23 @@ import org.apache.hadoop.io.VersionedWritable;
|
||||||
* <li>Regions in transition at master</li>
|
* <li>Regions in transition at master</li>
|
||||||
* <li>The unique cluster ID</li>
|
* <li>The unique cluster ID</li>
|
||||||
* </ul>
|
* </ul>
|
||||||
* <tt>{@link Options}</tt> provides a way to filter out infos which unwanted.
|
* <tt>{@link Option}</tt> provides a way to get desired ClusterStatus information.
|
||||||
* The following codes will retrieve all the cluster information.
|
* The following codes will get all the cluster information.
|
||||||
* <pre>
|
* <pre>
|
||||||
* {@code
|
* {@code
|
||||||
* // Original version still works
|
* // Original version still works
|
||||||
* Admin admin = connection.getAdmin();
|
* Admin admin = connection.getAdmin();
|
||||||
* ClusterStatus status = admin.getClusterStatus();
|
* ClusterStatus status = admin.getClusterStatus();
|
||||||
* // or below, a new version which has the same effects
|
* // or below, a new version which has the same effects
|
||||||
* ClusterStatus status = admin.getClusterStatus(Options.defaultOptions());
|
* ClusterStatus status = admin.getClusterStatus(EnumSet.allOf(Option.class));
|
||||||
* }
|
* }
|
||||||
* </pre>
|
* </pre>
|
||||||
* If information about dead servers and master coprocessors are unwanted,
|
* If information about live servers is the only wanted.
|
||||||
* then codes in the following way:
|
* then codes in the following way:
|
||||||
* <pre>
|
* <pre>
|
||||||
* {@code
|
* {@code
|
||||||
* Admin admin = connection.getAdmin();
|
* Admin admin = connection.getAdmin();
|
||||||
* ClusterStatus status = admin.getClusterStatus(
|
* ClusterStatus status = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
|
||||||
* Options.defaultOptions()
|
|
||||||
* .excludeDeadServers()
|
|
||||||
* .excludeMasterCoprocessors());
|
|
||||||
* }
|
* }
|
||||||
* </pre>
|
* </pre>
|
||||||
*/
|
*/
|
||||||
|
@ -208,23 +207,23 @@ public class ClusterStatus extends VersionedWritable {
|
||||||
if (!(o instanceof ClusterStatus)) {
|
if (!(o instanceof ClusterStatus)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return (getVersion() == ((ClusterStatus)o).getVersion()) &&
|
ClusterStatus other = (ClusterStatus) o;
|
||||||
getHBaseVersion().equals(((ClusterStatus)o).getHBaseVersion()) &&
|
//TODO Override the equals() methods in ServerLoad.
|
||||||
this.liveServers.equals(((ClusterStatus)o).liveServers) &&
|
return (getVersion() == other.getVersion()) &&
|
||||||
this.deadServers.containsAll(((ClusterStatus)o).deadServers) &&
|
Objects.equal(getHBaseVersion(), other.getHBaseVersion()) &&
|
||||||
Arrays.equals(this.masterCoprocessors,
|
Objects.equal(this.liveServers, other.liveServers) &&
|
||||||
((ClusterStatus)o).masterCoprocessors) &&
|
getDeadServerNames().containsAll(other.getDeadServerNames()) &&
|
||||||
this.master.equals(((ClusterStatus)o).master) &&
|
Arrays.equals(getMasterCoprocessors(), other.getMasterCoprocessors()) &&
|
||||||
this.backupMasters.containsAll(((ClusterStatus)o).backupMasters);
|
Objects.equal(getMaster(), other.getMaster()) &&
|
||||||
|
getBackupMasters().containsAll(other.getBackupMasters());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see java.lang.Object#hashCode()
|
* @see java.lang.Object#hashCode()
|
||||||
*/
|
*/
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return VERSION + hbaseVersion.hashCode() + this.liveServers.hashCode() +
|
return VERSION + Objects.hashCode(hbaseVersion, liveServers, deadServers,
|
||||||
this.deadServers.hashCode() + this.master.hashCode() +
|
master, backupMasters);
|
||||||
this.backupMasters.hashCode();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return the object version number */
|
/** @return the object version number */
|
||||||
|
@ -436,194 +435,17 @@ public class ClusterStatus extends VersionedWritable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Options provides a way to filter out unwanted information.
|
* Kinds of ClusterStatus
|
||||||
* For compatibility, default options includes all the information about a ClusterStatus.
|
|
||||||
* To filter out unwanted information, use the specific excludeXXX() method.
|
|
||||||
*/
|
*/
|
||||||
public static class Options {
|
public enum Option {
|
||||||
private boolean includeHBaseVersion = true;
|
HBASE_VERSION, /** status about hbase version */
|
||||||
private boolean includeLiveServers = true;
|
CLUSTER_ID, /** status about cluster id */
|
||||||
private boolean includeDeadServers = true;
|
BALANCER_ON, /** status about balancer is on or not */
|
||||||
private boolean includeMaster = true;
|
LIVE_SERVERS, /** status about live region servers */
|
||||||
private boolean includeBackupMasters = true;
|
DEAD_SERVERS, /** status about dead region servers */
|
||||||
private boolean includeRegionState = true;
|
MASTER, /** status about master */
|
||||||
private boolean includeClusterId = true;
|
BACKUP_MASTERS, /** status about backup masters */
|
||||||
private boolean includeMasterCoprocessors = true;
|
MASTER_COPROCESSORS, /** status about master coprocessors */
|
||||||
private boolean includeBalancerOn = true;
|
REGIONS_IN_TRANSITION; /** status about regions in transition */
|
||||||
|
|
||||||
private Options() {}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Include all information about a ClusterStatus.
|
|
||||||
*/
|
|
||||||
public static Options getDefaultOptions() {
|
|
||||||
return new Options();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Filter out hbase verision.
|
|
||||||
*/
|
|
||||||
public Options excludeHBaseVersion() {
|
|
||||||
includeHBaseVersion = false;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Filter out live servers.
|
|
||||||
*/
|
|
||||||
public Options excludeLiveServers() {
|
|
||||||
includeLiveServers = false;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Filter out dead servers info.
|
|
||||||
*/
|
|
||||||
public Options excludeDeadServers() {
|
|
||||||
includeDeadServers = false;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Filter out master info.
|
|
||||||
*/
|
|
||||||
public Options excludeMaster() {
|
|
||||||
includeMaster = false;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Filter out backup masters info.
|
|
||||||
*/
|
|
||||||
public Options excludeBackupMasters() {
|
|
||||||
includeBackupMasters = false;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Filter out region state.
|
|
||||||
*/
|
|
||||||
public Options excludeRegionState() {
|
|
||||||
includeRegionState = false;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Filter out cluster id.
|
|
||||||
*/
|
|
||||||
public Options excludeClusterId() {
|
|
||||||
includeClusterId = false;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Filter out master's coprocessors info.
|
|
||||||
*/
|
|
||||||
public Options excludeMasterCoprocessors() {
|
|
||||||
includeMasterCoprocessors = false;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Filter out balancer on info.
|
|
||||||
*/
|
|
||||||
public Options excludeBalancerOn() {
|
|
||||||
includeBalancerOn = false;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Include hbase version info.
|
|
||||||
*/
|
|
||||||
public boolean includeHBaseVersion() {
|
|
||||||
return includeHBaseVersion;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Include live servers info.
|
|
||||||
*/
|
|
||||||
public boolean includeLiveServers() {
|
|
||||||
return includeLiveServers;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Include dead servers info.
|
|
||||||
*/
|
|
||||||
public boolean includeDeadServers() {
|
|
||||||
return includeDeadServers;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Include master info.
|
|
||||||
*/
|
|
||||||
public boolean includeMaster() {
|
|
||||||
return includeMaster;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Include backup masters info.
|
|
||||||
*/
|
|
||||||
public boolean includeBackupMasters() {
|
|
||||||
return includeBackupMasters;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Include region states info.
|
|
||||||
*/
|
|
||||||
public boolean includeRegionState() {
|
|
||||||
return includeRegionState;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Include cluster id info.
|
|
||||||
*/
|
|
||||||
public boolean includeClusterId() {
|
|
||||||
return includeClusterId;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Include master's coprocessors.
|
|
||||||
*/
|
|
||||||
public boolean includeMasterCoprocessors() {
|
|
||||||
return includeMasterCoprocessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Include balancer on info.
|
|
||||||
*/
|
|
||||||
public boolean includeBalancerOn() {
|
|
||||||
return includeBalancerOn;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* For an options reusable convenience, reset options to default.
|
|
||||||
*/
|
|
||||||
public Options reset() {
|
|
||||||
includeHBaseVersion = true;
|
|
||||||
includeLiveServers = true;
|
|
||||||
includeDeadServers = true;
|
|
||||||
includeMaster = true;
|
|
||||||
includeBackupMasters = true;
|
|
||||||
includeRegionState = true;
|
|
||||||
includeClusterId = true;
|
|
||||||
includeMasterCoprocessors = true;
|
|
||||||
includeBalancerOn = true;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
StringBuilder builder = new StringBuilder("ClusterStatus info: [");
|
|
||||||
builder.append("include hbase version: " + includeHBaseVersion + ", ");
|
|
||||||
builder.append("include cluster id: " + includeClusterId + ", ");
|
|
||||||
builder.append("include master info: " + includeMaster + ", ");
|
|
||||||
builder.append("include backup masters info: " + includeBackupMasters + ", ");
|
|
||||||
builder.append("include live servers info: " + includeLiveServers + ", ");
|
|
||||||
builder.append("include dead servers info: " + includeDeadServers + ", ");
|
|
||||||
builder.append("include masters coprocessors: " + includeMasterCoprocessors + ", ");
|
|
||||||
builder.append("include region state: " + includeRegionState + ", ");
|
|
||||||
builder.append("include balancer on: " + includeBalancerOn + "]");
|
|
||||||
return builder.toString();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.EnumSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
@ -31,7 +32,7 @@ import java.util.regex.Pattern;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Abortable;
|
import org.apache.hadoop.hbase.Abortable;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus;
|
import org.apache.hadoop.hbase.ClusterStatus;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
import org.apache.hadoop.hbase.ClusterStatus.Option;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||||
|
@ -1231,17 +1232,27 @@ public interface Admin extends Abortable, Closeable {
|
||||||
void stopRegionServer(final String hostnamePort) throws IOException;
|
void stopRegionServer(final String hostnamePort) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* Get whole cluster status, containing status about:
|
||||||
|
* <pre>
|
||||||
|
* hbase version
|
||||||
|
* cluster id
|
||||||
|
* primary/backup master(s)
|
||||||
|
* master's coprocessors
|
||||||
|
* live/dead regionservers
|
||||||
|
* balancer
|
||||||
|
* regions in transition
|
||||||
|
* </pre>
|
||||||
* @return cluster status
|
* @return cluster status
|
||||||
* @throws IOException if a remote or network exception occurs
|
* @throws IOException if a remote or network exception occurs
|
||||||
*/
|
*/
|
||||||
ClusterStatus getClusterStatus() throws IOException;
|
ClusterStatus getClusterStatus() throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get cluster status with options to filter out unwanted status.
|
* Get cluster status with a set of {@link Option} to get desired status.
|
||||||
* @return cluster status
|
* @return cluster status
|
||||||
* @throws IOException if a remote or network exception occurs
|
* @throws IOException if a remote or network exception occurs
|
||||||
*/
|
*/
|
||||||
ClusterStatus getClusterStatus(Options options) throws IOException;
|
ClusterStatus getClusterStatus(EnumSet<Option> options) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get {@link RegionLoad} of all regions hosted on a regionserver.
|
* Get {@link RegionLoad} of all regions hosted on a regionserver.
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.EnumSet;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
@ -27,7 +28,7 @@ import java.util.function.Function;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.ClusterStatus;
|
import org.apache.hadoop.hbase.ClusterStatus;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
import org.apache.hadoop.hbase.ClusterStatus.Option;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.RegionLoad;
|
import org.apache.hadoop.hbase.RegionLoad;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
@ -838,7 +839,7 @@ public interface AsyncAdmin {
|
||||||
/**
|
/**
|
||||||
* @return cluster status wrapped by {@link CompletableFuture}
|
* @return cluster status wrapped by {@link CompletableFuture}
|
||||||
*/
|
*/
|
||||||
CompletableFuture<ClusterStatus> getClusterStatus(Options options);
|
CompletableFuture<ClusterStatus> getClusterStatus(EnumSet<Option> options);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return current master server name wrapped by {@link CompletableFuture}
|
* @return current master server name wrapped by {@link CompletableFuture}
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
package org.apache.hadoop.hbase.client;
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.EnumSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
@ -26,17 +27,11 @@ import java.util.concurrent.CompletableFuture;
|
||||||
import java.util.concurrent.ExecutorService;
|
import java.util.concurrent.ExecutorService;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
import java.util.stream.Collectors;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.io.netty.util.Timeout;
|
|
||||||
import org.apache.hadoop.hbase.shaded.io.netty.util.TimerTask;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus;
|
import org.apache.hadoop.hbase.ClusterStatus;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
import org.apache.hadoop.hbase.ClusterStatus.Option;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.RegionLoad;
|
import org.apache.hadoop.hbase.RegionLoad;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
@ -493,11 +488,11 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CompletableFuture<ClusterStatus> getClusterStatus() {
|
public CompletableFuture<ClusterStatus> getClusterStatus() {
|
||||||
return getClusterStatus(Options.getDefaultOptions());
|
return getClusterStatus(EnumSet.allOf(Option.class));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CompletableFuture<ClusterStatus> getClusterStatus(Options options) {
|
public CompletableFuture<ClusterStatus> getClusterStatus(EnumSet<Option> options) {
|
||||||
return wrap(rawAdmin.getClusterStatus(options));
|
return wrap(rawAdmin.getClusterStatus(options));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,7 @@ import java.io.InterruptedIOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.EnumSet;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
|
@ -45,7 +46,7 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Abortable;
|
import org.apache.hadoop.hbase.Abortable;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus;
|
import org.apache.hadoop.hbase.ClusterStatus;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
import org.apache.hadoop.hbase.ClusterStatus.Option;
|
||||||
import org.apache.hadoop.hbase.CompoundConfiguration;
|
import org.apache.hadoop.hbase.CompoundConfiguration;
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
|
@ -2038,11 +2039,11 @@ public class HBaseAdmin implements Admin {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ClusterStatus getClusterStatus() throws IOException {
|
public ClusterStatus getClusterStatus() throws IOException {
|
||||||
return getClusterStatus(Options.getDefaultOptions());
|
return getClusterStatus(EnumSet.allOf(Option.class));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ClusterStatus getClusterStatus(Options options) throws IOException {
|
public ClusterStatus getClusterStatus(EnumSet<Option> options) throws IOException {
|
||||||
return executeCallable(new MasterCallable<ClusterStatus>(getConnection(),
|
return executeCallable(new MasterCallable<ClusterStatus>(getConnection(),
|
||||||
this.rpcControllerFactory) {
|
this.rpcControllerFactory) {
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -24,6 +24,7 @@ import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.EnumSet;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -44,7 +45,7 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
|
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus;
|
import org.apache.hadoop.hbase.ClusterStatus;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
import org.apache.hadoop.hbase.ClusterStatus.Option;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HRegionLocation;
|
import org.apache.hadoop.hbase.HRegionLocation;
|
||||||
|
@ -2430,11 +2431,11 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CompletableFuture<ClusterStatus> getClusterStatus() {
|
public CompletableFuture<ClusterStatus> getClusterStatus() {
|
||||||
return getClusterStatus(Options.getDefaultOptions());
|
return getClusterStatus(EnumSet.allOf(Option.class));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CompletableFuture<ClusterStatus> getClusterStatus(Options options) {
|
public CompletableFuture<ClusterStatus>getClusterStatus(EnumSet<Option> options) {
|
||||||
return this
|
return this
|
||||||
.<ClusterStatus> newMasterCaller()
|
.<ClusterStatus> newMasterCaller()
|
||||||
.action(
|
.action(
|
||||||
|
|
|
@ -25,6 +25,7 @@ import java.lang.reflect.Method;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.EnumSet;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
|
@ -48,6 +49,7 @@ import org.apache.hadoop.hbase.CellScanner;
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
import org.apache.hadoop.hbase.CellUtil;
|
||||||
import org.apache.hadoop.hbase.ClusterId;
|
import org.apache.hadoop.hbase.ClusterId;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus;
|
import org.apache.hadoop.hbase.ClusterStatus;
|
||||||
|
import org.apache.hadoop.hbase.ClusterStatus.Option;
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
import org.apache.hadoop.hbase.ExtendedCellBuilder;
|
import org.apache.hadoop.hbase.ExtendedCellBuilder;
|
||||||
import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
|
import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
|
||||||
|
@ -3001,42 +3003,71 @@ public final class ProtobufUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert proto ClusterStatus.Options to ClusterStatusProtos.Options
|
* Convert ClusterStatusProtos.Option to ClusterStatus.Option
|
||||||
* @param opt
|
* @param option a ClusterStatusProtos.Option
|
||||||
* @return proto ClusterStatus.Options
|
* @return converted ClusterStatus.Option
|
||||||
*/
|
*/
|
||||||
public static ClusterStatus.Options toOptions (ClusterStatusProtos.Options opt) {
|
public static ClusterStatus.Option toOption(ClusterStatusProtos.Option option) {
|
||||||
ClusterStatus.Options option = ClusterStatus.Options.getDefaultOptions();
|
switch (option) {
|
||||||
if (!opt.getIncludeHbaseVersion()) option.excludeHBaseVersion();
|
case HBASE_VERSION: return ClusterStatus.Option.HBASE_VERSION;
|
||||||
if (!opt.getIncludeLiveServers()) option.excludeLiveServers();
|
case LIVE_SERVERS: return ClusterStatus.Option.LIVE_SERVERS;
|
||||||
if (!opt.getIncludeDeadServers()) option.excludeDeadServers();
|
case DEAD_SERVERS: return ClusterStatus.Option.DEAD_SERVERS;
|
||||||
if (!opt.getIncludeRegionsState()) option.excludeRegionState();
|
case REGIONS_IN_TRANSITION: return ClusterStatus.Option.REGIONS_IN_TRANSITION;
|
||||||
if (!opt.getIncludeClusterId()) option.excludeClusterId();
|
case CLUSTER_ID: return ClusterStatus.Option.CLUSTER_ID;
|
||||||
if (!opt.getIncludeMasterCoprocessors()) option.excludeMasterCoprocessors();
|
case MASTER_COPROCESSORS: return ClusterStatus.Option.MASTER_COPROCESSORS;
|
||||||
if (!opt.getIncludeMaster()) option.excludeMaster();
|
case MASTER: return ClusterStatus.Option.MASTER;
|
||||||
if (!opt.getIncludeBackupMasters()) option.excludeBackupMasters();
|
case BACKUP_MASTERS: return ClusterStatus.Option.BACKUP_MASTERS;
|
||||||
if (!opt.getIncludeBalancerOn()) option.excludeBalancerOn();
|
case BALANCER_ON: return ClusterStatus.Option.BALANCER_ON;
|
||||||
return option;
|
// should not reach here
|
||||||
|
default: throw new IllegalArgumentException("Invalid option: " + option);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert ClusterStatus.Options to proto ClusterStatusProtos.Options
|
* Convert ClusterStatus.Option to ClusterStatusProtos.Option
|
||||||
* @param opt
|
* @param option a ClusterStatus.Option
|
||||||
* @return ClusterStatus.Options
|
* @return converted ClusterStatusProtos.Option
|
||||||
*/
|
*/
|
||||||
public static ClusterStatusProtos.Options toOptions(ClusterStatus.Options opt) {
|
public static ClusterStatusProtos.Option toOption(ClusterStatus.Option option) {
|
||||||
ClusterStatusProtos.Options.Builder option =
|
switch (option) {
|
||||||
ClusterStatusProtos.Options.newBuilder();
|
case HBASE_VERSION: return ClusterStatusProtos.Option.HBASE_VERSION;
|
||||||
option.setIncludeHbaseVersion(opt.includeHBaseVersion())
|
case LIVE_SERVERS: return ClusterStatusProtos.Option.LIVE_SERVERS;
|
||||||
.setIncludeLiveServers(opt.includeLiveServers())
|
case DEAD_SERVERS: return ClusterStatusProtos.Option.DEAD_SERVERS;
|
||||||
.setIncludeDeadServers(opt.includeDeadServers())
|
case REGIONS_IN_TRANSITION: return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION;
|
||||||
.setIncludeRegionsState(opt.includeRegionState())
|
case CLUSTER_ID: return ClusterStatusProtos.Option.CLUSTER_ID;
|
||||||
.setIncludeClusterId(opt.includeClusterId())
|
case MASTER_COPROCESSORS: return ClusterStatusProtos.Option.MASTER_COPROCESSORS;
|
||||||
.setIncludeMasterCoprocessors(opt.includeMasterCoprocessors())
|
case MASTER: return ClusterStatusProtos.Option.MASTER;
|
||||||
.setIncludeMaster(opt.includeMaster())
|
case BACKUP_MASTERS: return ClusterStatusProtos.Option.BACKUP_MASTERS;
|
||||||
.setIncludeBackupMasters(opt.includeBackupMasters())
|
case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON;
|
||||||
.setIncludeBalancerOn(opt.includeBalancerOn());
|
// should not reach here
|
||||||
return option.build();
|
default: throw new IllegalArgumentException("Invalid option: " + option);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert a list of ClusterStatusProtos.Option to an enum set of ClusterStatus.Option
|
||||||
|
* @param options
|
||||||
|
* @return an enum set of ClusterStatus.Option
|
||||||
|
*/
|
||||||
|
public static EnumSet<Option> toOptions(List<ClusterStatusProtos.Option> options) {
|
||||||
|
EnumSet<Option> result = EnumSet.noneOf(Option.class);
|
||||||
|
for (ClusterStatusProtos.Option opt : options) {
|
||||||
|
result.add(toOption(opt));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert an enum set of ClusterStatus.Option to a list of ClusterStatusProtos.Option
|
||||||
|
* @param options
|
||||||
|
* @return a list of ClusterStatusProtos.Option
|
||||||
|
*/
|
||||||
|
public static List<ClusterStatusProtos.Option> toOptions(EnumSet<Option> options) {
|
||||||
|
List<ClusterStatusProtos.Option> result = new ArrayList<>(options.size());
|
||||||
|
for (ClusterStatus.Option opt : options) {
|
||||||
|
result.add(toOption(opt));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -20,13 +20,14 @@ package org.apache.hadoop.hbase.shaded.protobuf;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
import java.util.EnumSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.CellScannable;
|
import org.apache.hadoop.hbase.CellScannable;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
import org.apache.hadoop.hbase.ClusterStatus.Option;
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
@ -1516,9 +1517,9 @@ public final class RequestConverter {
|
||||||
*
|
*
|
||||||
* @return A GetClusterStatusRequest
|
* @return A GetClusterStatusRequest
|
||||||
*/
|
*/
|
||||||
public static GetClusterStatusRequest buildGetClusterStatusRequest(Options opt) {
|
public static GetClusterStatusRequest buildGetClusterStatusRequest(EnumSet<Option> options) {
|
||||||
return GetClusterStatusRequest.newBuilder()
|
return GetClusterStatusRequest.newBuilder()
|
||||||
.setClusterOptions(ProtobufUtil.toOptions(opt))
|
.addAllOptions(ProtobufUtil.toOptions(options))
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -226,14 +226,14 @@ message ClusterStatus {
|
||||||
optional bool balancer_on = 9;
|
optional bool balancer_on = 9;
|
||||||
}
|
}
|
||||||
|
|
||||||
message Options {
|
enum Option {
|
||||||
optional bool include_hbase_version = 1 [default = true];
|
HBASE_VERSION = 0;
|
||||||
optional bool include_live_servers = 2 [default = true];
|
CLUSTER_ID = 1;
|
||||||
optional bool include_dead_servers = 3 [default = true];
|
LIVE_SERVERS = 2;
|
||||||
optional bool include_regions_state = 4 [default = true];
|
DEAD_SERVERS = 3;
|
||||||
optional bool include_cluster_id = 5 [default = true];
|
MASTER = 4;
|
||||||
optional bool include_master_coprocessors = 6 [default = true];
|
BACKUP_MASTERS = 5;
|
||||||
optional bool include_master = 7 [default = true];
|
MASTER_COPROCESSORS = 6;
|
||||||
optional bool include_backup_masters = 8 [default = true];
|
REGIONS_IN_TRANSITION = 7;
|
||||||
optional bool include_balancer_on = 9 [default = true];
|
BALANCER_ON = 8;
|
||||||
}
|
}
|
||||||
|
|
|
@ -485,7 +485,7 @@ message GetTableStateResponse {
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetClusterStatusRequest {
|
message GetClusterStatusRequest {
|
||||||
required Options cluster_options = 1;
|
repeated Option options = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetClusterStatusResponse {
|
message GetClusterStatusResponse {
|
||||||
|
|
|
@ -226,14 +226,14 @@ message ClusterStatus {
|
||||||
optional bool balancer_on = 9;
|
optional bool balancer_on = 9;
|
||||||
}
|
}
|
||||||
|
|
||||||
message Options {
|
enum Option {
|
||||||
optional bool include_hbase_version = 1 [default = true];
|
HBASE_VERSION = 0;
|
||||||
optional bool include_live_servers = 2 [default = true];
|
CLUSTER_ID = 1;
|
||||||
optional bool include_dead_servers = 3 [default = true];
|
LIVE_SERVERS = 2;
|
||||||
optional bool include_regions_state = 4 [default = true];
|
DEAD_SERVERS = 3;
|
||||||
optional bool include_cluster_id = 5 [default = true];
|
MASTER = 4;
|
||||||
optional bool include_master_coprocessors = 6 [default = true];
|
BACKUP_MASTERS = 5;
|
||||||
optional bool include_master = 7 [default = true];
|
MASTER_COPROCESSORS = 6;
|
||||||
optional bool include_backup_masters = 8 [default = true];
|
REGIONS_IN_TRANSITION = 7;
|
||||||
optional bool include_balancer_on = 9 [default = true];
|
BALANCER_ON = 8;
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
|
import java.util.EnumSet;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -56,7 +57,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus;
|
import org.apache.hadoop.hbase.ClusterStatus;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
import org.apache.hadoop.hbase.ClusterStatus.Option;
|
||||||
import org.apache.hadoop.hbase.CoordinatedStateException;
|
import org.apache.hadoop.hbase.CoordinatedStateException;
|
||||||
import org.apache.hadoop.hbase.CoordinatedStateManager;
|
import org.apache.hadoop.hbase.CoordinatedStateManager;
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
|
@ -2457,43 +2458,51 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
* @return cluster status
|
* @return cluster status
|
||||||
*/
|
*/
|
||||||
public ClusterStatus getClusterStatus() throws InterruptedIOException {
|
public ClusterStatus getClusterStatus() throws InterruptedIOException {
|
||||||
return getClusterStatus(Options.getDefaultOptions());
|
return getClusterStatus(EnumSet.allOf(Option.class));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return cluster status
|
* @return cluster status
|
||||||
*/
|
*/
|
||||||
public ClusterStatus getClusterStatus(Options options) throws InterruptedIOException {
|
public ClusterStatus getClusterStatus(EnumSet<Option> options) throws InterruptedIOException {
|
||||||
if (LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("Retrieving cluster status info. " + options);
|
|
||||||
}
|
|
||||||
ClusterStatus.Builder builder = ClusterStatus.newBuilder();
|
ClusterStatus.Builder builder = ClusterStatus.newBuilder();
|
||||||
if (options.includeHBaseVersion()) {
|
for (Option opt : options) {
|
||||||
builder.setHBaseVersion(VersionInfo.getVersion());
|
switch (opt) {
|
||||||
}
|
case HBASE_VERSION: builder.setHBaseVersion(VersionInfo.getVersion()); break;
|
||||||
if (options.includeClusterId()) {
|
case CLUSTER_ID: builder.setClusterId(getClusterId()); break;
|
||||||
builder.setClusterId(getClusterId());
|
case MASTER: builder.setMaster(getServerName()); break;
|
||||||
}
|
case BACKUP_MASTERS: builder.setBackupMasters(getBackupMasters()); break;
|
||||||
if (options.includeLiveServers() && serverManager != null) {
|
case LIVE_SERVERS: {
|
||||||
builder.setLiveServers(serverManager.getOnlineServers());
|
if (serverManager != null) {
|
||||||
}
|
builder.setLiveServers(serverManager.getOnlineServers());
|
||||||
if (options.includeDeadServers() && serverManager != null) {
|
}
|
||||||
builder.setDeadServers(serverManager.getDeadServers().copyServerNames());
|
break;
|
||||||
}
|
}
|
||||||
if (options.includeMaster()) {
|
case DEAD_SERVERS: {
|
||||||
builder.setMaster(getServerName());
|
if (serverManager != null) {
|
||||||
}
|
builder.setDeadServers(serverManager.getDeadServers().copyServerNames());
|
||||||
if (options.includeBackupMasters()) {
|
}
|
||||||
builder.setBackupMasters(getBackupMasters());
|
break;
|
||||||
}
|
}
|
||||||
if (options.includeRegionState() && assignmentManager != null) {
|
case MASTER_COPROCESSORS: {
|
||||||
builder.setRegionState(assignmentManager.getRegionStates().getRegionsStateInTransition());
|
if (cpHost != null) {
|
||||||
}
|
builder.setMasterCoprocessors(getMasterCoprocessors());
|
||||||
if (options.includeMasterCoprocessors() && cpHost != null) {
|
}
|
||||||
builder.setMasterCoprocessors(getMasterCoprocessors());
|
break;
|
||||||
}
|
}
|
||||||
if (options.includeBalancerOn() && loadBalancerTracker != null) {
|
case REGIONS_IN_TRANSITION: {
|
||||||
builder.setBalancerOn(loadBalancerTracker.isBalancerOn());
|
if (assignmentManager != null) {
|
||||||
|
builder.setRegionState(assignmentManager.getRegionStates().getRegionsStateInTransition());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case BALANCER_ON: {
|
||||||
|
if (loadBalancerTracker != null) {
|
||||||
|
builder.setBalancerOn(loadBalancerTracker.isBalancerOn());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
|
@ -870,7 +870,7 @@ public class MasterRpcServices extends RSRpcServices
|
||||||
try {
|
try {
|
||||||
master.checkInitialized();
|
master.checkInitialized();
|
||||||
response.setClusterStatus(ProtobufUtil.convert(
|
response.setClusterStatus(ProtobufUtil.convert(
|
||||||
master.getClusterStatus(ProtobufUtil.toOptions(req.getClusterOptions()))));
|
master.getClusterStatus(ProtobufUtil.toOptions(req.getOptionsList()))));
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ServiceException(e);
|
throw new ServiceException(e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,18 +17,18 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.client;
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
|
import java.util.EnumSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus;
|
import org.apache.hadoop.hbase.ClusterStatus;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
import org.apache.hadoop.hbase.ClusterStatus.Option;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.master.HMaster;
|
import org.apache.hadoop.hbase.master.HMaster;
|
||||||
import org.apache.hadoop.hbase.master.LoadBalancer;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||||
|
@ -73,7 +73,7 @@ public class TestClientClusterStatus {
|
||||||
@Test
|
@Test
|
||||||
public void testDefaults() throws Exception {
|
public void testDefaults() throws Exception {
|
||||||
ClusterStatus origin = ADMIN.getClusterStatus();
|
ClusterStatus origin = ADMIN.getClusterStatus();
|
||||||
ClusterStatus defaults = ADMIN.getClusterStatus(Options.getDefaultOptions());
|
ClusterStatus defaults = ADMIN.getClusterStatus(EnumSet.allOf(Option.class));
|
||||||
Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
|
Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
|
||||||
Assert.assertEquals(origin.getClusterId(), defaults.getClusterId());
|
Assert.assertEquals(origin.getClusterId(), defaults.getClusterId());
|
||||||
Assert.assertTrue(origin.getAverageLoad() == defaults.getAverageLoad());
|
Assert.assertTrue(origin.getAverageLoad() == defaults.getAverageLoad());
|
||||||
|
@ -84,18 +84,8 @@ public class TestClientClusterStatus {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testExclude() throws Exception {
|
public void testNone() throws Exception {
|
||||||
ClusterStatus.Options options = Options.getDefaultOptions();
|
ClusterStatus status = ADMIN.getClusterStatus(EnumSet.noneOf(Option.class));
|
||||||
// Only retrieve master's coprocessors which are null in this test env.
|
|
||||||
options.excludeHBaseVersion()
|
|
||||||
.excludeBackupMasters()
|
|
||||||
.excludeBalancerOn()
|
|
||||||
.excludeClusterId()
|
|
||||||
.excludeLiveServers()
|
|
||||||
.excludeDeadServers()
|
|
||||||
.excludeMaster()
|
|
||||||
.excludeRegionState();
|
|
||||||
ClusterStatus status = ADMIN.getClusterStatus(options);
|
|
||||||
// Other cluster status info should be either null or empty.
|
// Other cluster status info should be either null or empty.
|
||||||
Assert.assertTrue(status.getMasterCoprocessors().length == 0);
|
Assert.assertTrue(status.getMasterCoprocessors().length == 0);
|
||||||
Assert.assertNull(status.getHBaseVersion());
|
Assert.assertNull(status.getHBaseVersion());
|
||||||
|
@ -106,6 +96,11 @@ public class TestClientClusterStatus {
|
||||||
Assert.assertTrue(status.getDeadServerNames().isEmpty());
|
Assert.assertTrue(status.getDeadServerNames().isEmpty());
|
||||||
Assert.assertNull(status.getMaster());
|
Assert.assertNull(status.getMaster());
|
||||||
Assert.assertTrue(status.getBackupMasters().isEmpty());
|
Assert.assertTrue(status.getBackupMasters().isEmpty());
|
||||||
|
// No npe thrown is expected
|
||||||
|
Assert.assertNotNull(status.hashCode());
|
||||||
|
ClusterStatus nullEqualsCheck =
|
||||||
|
ADMIN.getClusterStatus(EnumSet.noneOf(Option.class));
|
||||||
|
Assert.assertNotNull(status.equals(nullEqualsCheck));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -117,7 +112,7 @@ public class TestClientClusterStatus {
|
||||||
CompletableFuture<ClusterStatus> originFuture =
|
CompletableFuture<ClusterStatus> originFuture =
|
||||||
asyncAdmin.getClusterStatus();
|
asyncAdmin.getClusterStatus();
|
||||||
CompletableFuture<ClusterStatus> defaultsFuture =
|
CompletableFuture<ClusterStatus> defaultsFuture =
|
||||||
asyncAdmin.getClusterStatus(Options.getDefaultOptions());
|
asyncAdmin.getClusterStatus(EnumSet.allOf(Option.class));
|
||||||
ClusterStatus origin = originFuture.get();
|
ClusterStatus origin = originFuture.get();
|
||||||
ClusterStatus defaults = defaultsFuture.get();
|
ClusterStatus defaults = defaultsFuture.get();
|
||||||
Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
|
Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
|
||||||
|
@ -143,14 +138,7 @@ public class TestClientClusterStatus {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Retrieve live servers and dead servers info.
|
// Retrieve live servers and dead servers info.
|
||||||
ClusterStatus.Options options = Options.getDefaultOptions();
|
EnumSet<Option> options = EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS);
|
||||||
options.excludeHBaseVersion()
|
|
||||||
.excludeBackupMasters()
|
|
||||||
.excludeBalancerOn()
|
|
||||||
.excludeClusterId()
|
|
||||||
.excludeMaster()
|
|
||||||
.excludeMasterCoprocessors()
|
|
||||||
.excludeRegionState();
|
|
||||||
ClusterStatus status = ADMIN.getClusterStatus(options);
|
ClusterStatus status = ADMIN.getClusterStatus(options);
|
||||||
Assert.assertNotNull(status);
|
Assert.assertNotNull(status);
|
||||||
Assert.assertNotNull(status.getServers());
|
Assert.assertNotNull(status.getServers());
|
||||||
|
@ -187,14 +175,7 @@ public class TestClientClusterStatus {
|
||||||
Assert.assertEquals(1, numActive);
|
Assert.assertEquals(1, numActive);
|
||||||
Assert.assertEquals(MASTERS, masterThreads.size());
|
Assert.assertEquals(MASTERS, masterThreads.size());
|
||||||
// Retrieve master and backup masters infos only.
|
// Retrieve master and backup masters infos only.
|
||||||
ClusterStatus.Options options = Options.getDefaultOptions();
|
EnumSet<Option> options = EnumSet.of(Option.MASTER, Option.BACKUP_MASTERS);
|
||||||
options.excludeHBaseVersion()
|
|
||||||
.excludeBalancerOn()
|
|
||||||
.excludeClusterId()
|
|
||||||
.excludeLiveServers()
|
|
||||||
.excludeDeadServers()
|
|
||||||
.excludeMasterCoprocessors()
|
|
||||||
.excludeRegionState();
|
|
||||||
ClusterStatus status = ADMIN.getClusterStatus(options);
|
ClusterStatus status = ADMIN.getClusterStatus(options);
|
||||||
Assert.assertTrue(status.getMaster().equals(activeName));
|
Assert.assertTrue(status.getMaster().equals(activeName));
|
||||||
Assert.assertEquals(MASTERS - 1, status.getBackupMastersSize());
|
Assert.assertEquals(MASTERS - 1, status.getBackupMastersSize());
|
||||||
|
@ -202,12 +183,9 @@ public class TestClientClusterStatus {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testOtherStatusInfos() throws Exception {
|
public void testOtherStatusInfos() throws Exception {
|
||||||
ClusterStatus.Options options = Options.getDefaultOptions();
|
EnumSet<Option> options =
|
||||||
options.excludeMaster()
|
EnumSet.of(Option.MASTER_COPROCESSORS, Option.HBASE_VERSION,
|
||||||
.excludeBackupMasters()
|
Option.CLUSTER_ID, Option.BALANCER_ON);
|
||||||
.excludeRegionState()
|
|
||||||
.excludeLiveServers()
|
|
||||||
.excludeBackupMasters();
|
|
||||||
ClusterStatus status = ADMIN.getClusterStatus(options);
|
ClusterStatus status = ADMIN.getClusterStatus(options);
|
||||||
Assert.assertTrue(status.getMasterCoprocessors().length == 0);
|
Assert.assertTrue(status.getMasterCoprocessors().length == 0);
|
||||||
Assert.assertNotNull(status.getHBaseVersion());
|
Assert.assertNotNull(status.getHBaseVersion());
|
||||||
|
|
Loading…
Reference in New Issue