HBASE-12679 Add HBaseInterfaceAudience.TOOLS and move some of the Public classes to LimitedPrivate

This commit is contained in:
Enis Soztutar 2014-12-14 17:46:30 -08:00
parent 65830b096b
commit 743d16138e
31 changed files with 117 additions and 60 deletions

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
* its session but didn't figure it yet.
*/
@SuppressWarnings("serial")
@InterfaceAudience.Public
@InterfaceAudience.Private
@InterfaceStability.Stable
public class YouAreDeadException extends IOException {
public YouAreDeadException(String message) {

View File

@ -29,7 +29,9 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
* Defines methods to create new HTableInterface.
*
* @since 0.21.0
* @deprecated in favor of {@link ConnectionFactory} and {@link Connection}.
*/
@Deprecated
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface HTableInterfaceFactory {

View File

@ -146,7 +146,7 @@ public abstract class Query extends OperationWithAttributes {
return this.targetReplicaId;
}
/*
/**
* Set the isolation level for this query. If the
* isolation level is set to READ_UNCOMMITTED, then
* this query will return data from committed and
@ -161,7 +161,8 @@ public abstract class Query extends OperationWithAttributes {
setAttribute(ISOLATION_LEVEL, level.toBytes());
return this;
}
/*
/**
* @return The isolation level of this query.
* If no isolation level was set for this query object,
* then it returns READ_COMMITTED.

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.util.Bytes;
@ -38,8 +37,7 @@ import org.apache.hadoop.hbase.util.Bytes;
/**
* A reversed ScannerCallable which supports backward scanning.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
@InterfaceAudience.Private
public class ReversedScannerCallable extends ScannerCallable {
/**
* The start row for locating regions. In reversed scanner, may locate the

View File

@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
@InterfaceAudience.Private
public class RegionState {
@InterfaceAudience.Public
@InterfaceAudience.Private
@InterfaceStability.Evolving
public enum State {
OFFLINE, // region is in an offline state

View File

@ -35,6 +35,7 @@ import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.util.Strings;
@ -52,10 +53,10 @@ import org.apache.zookeeper.server.quorum.QuorumPeerMain;
* control over the process. This class uses {@link ZKConfig} to parse the
* zoo.cfg and inject variables from HBase's site.xml configuration in.
*/
@InterfaceAudience.Public
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
@InterfaceStability.Evolving
public class HQuorumPeer {
/**
* Parse ZooKeeper configuration from HBase XML config and run a QuorumPeer.
* @param args String[] of command line arguments. Not used.

View File

@ -39,4 +39,10 @@ public final class HBaseInterfaceAudience {
* Denotes class names that appear in user facing configuration files.
*/
public static final String CONFIG = "Configuration";
/**
* Denotes classes used as tools (Used from cmd line). Usually, the compatibility is required
* for class name, and arguments.
*/
public static final String TOOLS = "Tools";
}

View File

@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.http.InfoServer;
import org.apache.hadoop.hbase.rest.filter.AuthFilter;
import org.apache.hadoop.hbase.security.UserProvider;
@ -63,7 +64,7 @@ import com.sun.jersey.spi.container.servlet.ServletContainer;
* <li>-ro --readonly : server mode</li>
* </ul>
*/
@InterfaceAudience.Private
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
public class RESTServer implements Constants {
private static void printUsageAndExit(Options options, int exitCode) {

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
@ -77,7 +78,7 @@ import com.yammer.metrics.reporting.ConsoleReporter;
/**
* Implements pretty-printing functionality for {@link HFile}s.
*/
@InterfaceAudience.Public
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
@InterfaceStability.Evolving
public class HFilePrettyPrinter extends Configured implements Tool {
@ -206,6 +207,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
* Runs the command-line pretty-printer, and returns the desired command
* exit code (zero for success, non-zero for failure).
*/
@Override
public int run(String[] args) {
if (getConf() == null) {
throw new RuntimeException("A Configuration instance must be provided.");
@ -261,7 +263,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
boolean shouldScanKeysValues = false;
if (this.isSeekToRow) {
// seek to the first kv on this row
shouldScanKeysValues =
shouldScanKeysValues =
(scanner.seekTo(KeyValueUtil.createFirstOnRow(this.row).getKey()) != -1);
} else {
shouldScanKeysValues = scanner.seekTo();

View File

@ -18,23 +18,25 @@
*/
package org.apache.hadoop.hbase.mapred;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.util.ProgramDriver;
import com.google.common.annotations.VisibleForTesting;
/**
* Driver for hbase mapreduce jobs. Select which to run by passing name of job
* to this main.
*/
@InterfaceAudience.Public
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
@InterfaceStability.Stable
public class Driver {
private static ProgramDriver pgd = new ProgramDriver();
@VisibleForTesting
static void setProgramDriver(ProgramDriver pgd0) {
static void setProgramDriver(ProgramDriver pgd0) {
pgd = pgd0;
}
@ -45,6 +47,6 @@ public class Driver {
public static void main(String[] args) throws Throwable {
pgd.addClass(RowCounter.NAME, RowCounter.class, "Count rows in HBase table");
ProgramDriver.class.getMethod("driver", new Class[] { String[].class })
.invoke(pgd, new Object[] { args });
.invoke(pgd, new Object[] { args });
}
}

View File

@ -18,6 +18,7 @@
*/
package org.apache.hadoop.hbase.mapreduce;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication;
@ -27,7 +28,7 @@ import org.apache.hadoop.util.ProgramDriver;
* Driver for hbase mapreduce jobs. Select which to run by passing
* name of job to this main.
*/
@InterfaceAudience.Public
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
@InterfaceStability.Stable
public class Driver {
/**

View File

@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.CoordinatedStateException;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@ -159,7 +160,7 @@ import com.google.protobuf.Service;
*
* @see org.apache.zookeeper.Watcher
*/
@InterfaceAudience.Private
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
@SuppressWarnings("deprecation")
public class HMaster extends HRegionServer implements MasterServices, Server {
private static final Log LOG = LogFactory.getLog(HMaster.class.getName());
@ -424,6 +425,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
/**
* For compatibility, if failed with regionserver credentials, try the master one
*/
@Override
protected void login(UserProvider user, String host) throws IOException {
try {
super.login(user, host);
@ -438,6 +440,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
* wait till a backup master becomes active.
* Otherwise, loop till the server is stopped or aborted.
*/
@Override
protected void waitForMasterActive(){
boolean tablesOnMaster = BaseLoadBalancer.tablesOnMaster(conf);
while (!(tablesOnMaster && isActiveMaster)
@ -455,22 +458,27 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);
}
@Override
protected String getProcessName() {
return MASTER;
}
@Override
protected boolean canCreateBaseZNode() {
return true;
}
@Override
protected boolean canUpdateTableDescriptor() {
return true;
}
@Override
protected RSRpcServices createRpcServices() throws IOException {
return new MasterRpcServices(this);
}
@Override
protected void configureInfoServer() {
infoServer.addServlet("master-status", "/master-status", MasterStatusServlet.class);
infoServer.setAttribute(MASTER, this);
@ -479,6 +487,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
}
@Override
protected Class<? extends HttpServlet> getDumpServlet() {
return MasterDumpServlet.class;
}
@ -487,6 +496,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
* Emit the HMaster metrics, such as region in transition metrics.
* Surrounding in a try block just to be sure metrics doesn't abort HMaster.
*/
@Override
protected void doMetrics() {
try {
if (assignmentManager != null) {
@ -963,6 +973,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
}
@Override
protected void stopServiceThreads() {
if (masterJettyServer != null) {
LOG.info("Stopping master jetty server");
@ -1370,6 +1381,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
masterInfoPort, this);
// Start a thread to try to become the active master, so we won't block here
Threads.setDaemonThreadRunning(new Thread(new Runnable() {
@Override
public void run() {
int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT,
HConstants.DEFAULT_ZK_SESSION_TIMEOUT);

View File

@ -27,7 +27,6 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@ -42,7 +41,6 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
@ -50,8 +48,8 @@ import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
@ -73,7 +71,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
* <li>family folder (the store files will be compacted)
* </ul>
*/
@InterfaceAudience.Public
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
public class CompactionTool extends Configured implements Tool {
private static final Log LOG = LogFactory.getLog(CompactionTool.class);

View File

@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.ClockOutOfSyncException;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.CoordinatedStateManagerFactory;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HealthCheckChore;
@ -178,7 +179,7 @@ import com.google.protobuf.ServiceException;
* HRegionServer makes a set of HRegions available to clients. It checks in with
* the HMaster. There are many HRegionServers in a single HBase deployment.
*/
@InterfaceAudience.Private
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
@SuppressWarnings("deprecation")
public class HRegionServer extends HasThread implements
RegionServerServices, LastSequenceId {

View File

@ -28,13 +28,13 @@ import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerResult;
* Makes the decision regarding proper sizing of the heap memory. Decides what percentage of heap
* memory should be allocated for global memstore and BlockCache.
*/
@InterfaceAudience.Public
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface HeapMemoryTuner extends Configurable {
/**
* Perform the heap memory tuning operation.
*
*
* @param context
* @return <code>TunerResult</code> including the heap percentage for memstore and block cache
*/

View File

@ -19,26 +19,26 @@ package org.apache.hadoop.hbase.regionserver.wal;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.wal.WALPrettyPrinter;
/**
* HLogPrettyPrinter prints the contents of a given HLog with a variety of
* options affecting formatting and extent of content.
*
*
* It targets two usage cases: pretty printing for ease of debugging directly by
* humans, and JSON output for consumption by monitoring and/or maintenance
* scripts.
*
*
* It can filter by row, region, or sequence id.
*
*
* It can also toggle output of values.
*
*
* @deprecated use the "hbase wal" command
*/
@InterfaceAudience.Public
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
@InterfaceStability.Evolving
@Deprecated
public class HLogPrettyPrinter extends WALPrettyPrinter {
@ -52,7 +52,7 @@ public class HLogPrettyPrinter extends WALPrettyPrinter {
/**
* Fully specified constructor.
*
*
* @param outputValues
* when true, enables output of values along with other log
* information
@ -72,7 +72,7 @@ public class HLogPrettyPrinter extends WALPrettyPrinter {
* keeps a single list running for multiple files. if enabled, the
* endPersistentOutput() method must be used!
* @param out
* Specifies an alternative to stdout for the destination of this
* Specifies an alternative to stdout for the destination of this
* PrettyPrinter's output.
*/
public HLogPrettyPrinter(boolean outputValues, boolean outputJSON,

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@ -45,7 +46,7 @@ import org.apache.hadoop.io.compress.Compressor;
* Compression validation test. Checks compression is working. Be sure to run
* on every node in your cluster.
*/
@InterfaceAudience.Public
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
@InterfaceStability.Evolving
public class CompressionTest {
static final Log LOG = LogFactory.getLog(CompressionTest.class);

View File

@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@ -177,7 +178,7 @@ import com.google.protobuf.ServiceException;
* can be used to limit the kinds of repairs hbck will do. See the code in
* {@link #printUsageAndExit()} for more details.
*/
@InterfaceAudience.Public
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
@InterfaceStability.Evolving
public class HBaseFsck extends Configured {
public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute

View File

@ -19,6 +19,8 @@ package org.apache.hadoop.hbase.util;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@ -29,6 +31,7 @@ import org.apache.log4j.Logger;
* Generate a classpath string containing any jars required by mapreduce jobs. Specify
* additional values by providing a comma-separated list of paths via -Dtmpjars.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
public class MapreduceDependencyClasspathTool implements Tool {
private Configuration conf;

View File

@ -24,11 +24,11 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
@ -53,8 +53,7 @@ import com.google.common.base.Preconditions;
* Utility that can merge any two regions in the same table: adjacent,
* overlapping or disjoint.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
public class Merge extends Configured implements Tool {
static final Log LOG = LogFactory.getLog(Merge.class);
private Path rootdir;
@ -77,6 +76,7 @@ public class Merge extends Configured implements Tool {
setConf(conf);
}
@Override
public int run(String[] args) throws Exception {
if (parseArgs(args) != 0) {
return -1;

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HBaseFsck;
import org.apache.hadoop.io.MultipleIOException;
@ -36,13 +37,13 @@ import org.apache.hadoop.io.MultipleIOException;
* are any problem detected, it will fail suggesting actions for the user to do
* to "fix" problems. If it succeeds, it will backup the previous hbase:meta and
* -ROOT- dirs and write new tables in place.
*
*
* This is an advanced feature, so is only exposed for use if explicitly
* mentioned.
*
*
* hbase org.apache.hadoop.hbase.util.hbck.OfflineMetaRepair ...
*/
@InterfaceAudience.Public
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
@InterfaceStability.Evolving
public class OfflineMetaRepair {
private static final Log LOG = LogFactory.getLog(OfflineMetaRepair.class.getName());
@ -62,7 +63,7 @@ public class OfflineMetaRepair {
/**
* Main program
*
*
* @param args
* @throws Exception
*/

View File

@ -34,12 +34,14 @@ import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
@ -51,17 +53,18 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
/**
* WALPrettyPrinter prints the contents of a given WAL with a variety of
* options affecting formatting and extent of content.
*
*
* It targets two usage cases: pretty printing for ease of debugging directly by
* humans, and JSON output for consumption by monitoring and/or maintenance
* scripts.
*
*
* It can filter by row, region, or sequence id.
*
*
* It can also toggle output of values.
*
*
*/
@InterfaceAudience.Private
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
@InterfaceStability.Evolving
public class WALPrettyPrinter {
private boolean outputValues;
private boolean outputJSON;
@ -93,7 +96,7 @@ public class WALPrettyPrinter {
/**
* Fully specified constructor.
*
*
* @param outputValues
* when true, enables output of values along with other log
* information
@ -113,7 +116,7 @@ public class WALPrettyPrinter {
* keeps a single list running for multiple files. if enabled, the
* endPersistentOutput() method must be used!
* @param out
* Specifies an alternative to stdout for the destination of this
* Specifies an alternative to stdout for the destination of this
* PrettyPrinter's output.
*/
public WALPrettyPrinter(boolean outputValues, boolean outputJSON,
@ -162,7 +165,7 @@ public class WALPrettyPrinter {
/**
* sets the region by which output will be filtered
*
*
* @param sequence
* when nonnegative, serves as a filter; only log entries with this
* sequence id will be printed
@ -173,7 +176,7 @@ public class WALPrettyPrinter {
/**
* sets the region by which output will be filtered
*
*
* @param region
* when not null, serves as a filter; only log entries from this
* region will be printed
@ -184,7 +187,7 @@ public class WALPrettyPrinter {
/**
* sets the region by which output will be filtered
*
*
* @param row
* when not null, serves as a filter; only log entries from this row
* will be printed
@ -221,7 +224,7 @@ public class WALPrettyPrinter {
/**
* reads a log file and outputs its contents, one transaction at a time, as
* specified by the currently configured options
*
*
* @param conf
* the HBase configuration relevant to this log file
* @param p
@ -339,7 +342,7 @@ public class WALPrettyPrinter {
/**
* Pass one or more log file names and formatting options and it will dump out
* a text version of the contents on <code>stdout</code>.
*
*
* @param args
* Command line arguments
* @throws IOException

View File

@ -23,16 +23,15 @@ import java.util.Properties;
import java.util.Map.Entry;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
/**
* Tool for reading ZooKeeper servers from HBase XML configuration and producing
* a line-by-line list for use by bash scripts.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
public class ZKServerTool {
/**
* Run the tool.

View File

@ -27,6 +27,8 @@ import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooKeeperMain;
@ -34,6 +36,7 @@ import org.apache.zookeeper.ZooKeeperMain;
* Tool for running ZookeeperMain from HBase by reading a ZooKeeper server
* from HBase XML configuration.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
public class ZooKeeperMainServer {
private static final String SERVER_ARG = "-server";

View File

@ -28,6 +28,7 @@ import org.apache.commons.math.random.RandomDataImpl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.util.Bytes;
/**
* This class runs performance benchmarks for {@link HFile}.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
public class HFilePerformanceEvaluation {
private static final int ROW_LENGTH = 10;
private static final int ROW_COUNT = 1000000;
@ -91,6 +93,7 @@ public class HFilePerformanceEvaluation {
runBenchmark(new SequentialWriteBenchmark(conf, fs, mf, ROW_COUNT),
ROW_COUNT);
PerformanceEvaluationCommons.concurrentReads(new Runnable() {
@Override
public void run() {
try {
runBenchmark(new UniformRandomSmallScan(conf, fs, mf, ROW_COUNT),
@ -101,6 +104,7 @@ public class HFilePerformanceEvaluation {
}
});
PerformanceEvaluationCommons.concurrentReads(new Runnable() {
@Override
public void run() {
try {
runBenchmark(new UniformRandomReadBenchmark(conf, fs, mf, ROW_COUNT),
@ -111,6 +115,7 @@ public class HFilePerformanceEvaluation {
}
});
PerformanceEvaluationCommons.concurrentReads(new Runnable() {
@Override
public void run() {
try {
runBenchmark(new GaussianRandomReadBenchmark(conf, fs, mf, ROW_COUNT),
@ -121,6 +126,7 @@ public class HFilePerformanceEvaluation {
}
});
PerformanceEvaluationCommons.concurrentReads(new Runnable() {
@Override
public void run() {
try {
runBenchmark(new SequentialReadBenchmark(conf, fs, mf, ROW_COUNT),

View File

@ -37,7 +37,9 @@ import org.apache.hadoop.io.WritableComparable;
* <p>
* This class runs performance benchmarks for {@link MapFile}.
* </p>
* @deprecated HBase does not use MapFiles any more.
*/
@Deprecated
public class MapFilePerformanceEvaluation {
protected final Configuration conf;
private static final int ROW_LENGTH = 10;
@ -70,6 +72,7 @@ public class MapFilePerformanceEvaluation {
ROW_COUNT);
PerformanceEvaluationCommons.concurrentReads(new Runnable() {
@Override
public void run() {
try {
runBenchmark(new UniformRandomSmallScan(conf, fs, mf, ROW_COUNT),
@ -80,6 +83,7 @@ public class MapFilePerformanceEvaluation {
}
});
PerformanceEvaluationCommons.concurrentReads(new Runnable() {
@Override
public void run() {
try {
runBenchmark(new UniformRandomReadBenchmark(conf, fs, mf, ROW_COUNT),
@ -90,6 +94,7 @@ public class MapFilePerformanceEvaluation {
}
});
PerformanceEvaluationCommons.concurrentReads(new Runnable() {
@Override
public void run() {
try {
runBenchmark(new GaussianRandomReadBenchmark(conf, fs, mf, ROW_COUNT),
@ -100,6 +105,7 @@ public class MapFilePerformanceEvaluation {
}
});
PerformanceEvaluationCommons.concurrentReads(new Runnable() {
@Override
public void run() {
try {
runBenchmark(new SequentialReadBenchmark(conf, fs, mf, ROW_COUNT),

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
@ -113,6 +114,7 @@ import com.yammer.metrics.stats.UniformSample;
* specifying {@code --nomapred}. Each client does about 1GB of data, unless
* specified otherwise.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
public class PerformanceEvaluation extends Configured implements Tool {
protected static final Log LOG = LogFactory.getLog(PerformanceEvaluation.class.getName());
private static final ObjectMapper MAPPER = new ObjectMapper();
@ -657,7 +659,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
public void setCycles(final int cycles) {
this.cycles = cycles;
}
public boolean isValueZipf() {
return valueZipf;
}

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@ -53,6 +54,7 @@ import com.google.common.base.Stopwatch;
* A simple performance evaluation tool for single client and MR scans
* and snapshot scans.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
public class ScanPerformanceEvaluation extends AbstractHBaseTool {
private static final String HBASE_COUNTER_GROUP_NAME = "HBase Counters";

View File

@ -35,11 +35,13 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -63,6 +65,7 @@ import org.apache.hadoop.util.ToolRunner;
* {@link PerformanceEvaluation}, this tool validates the data written,
* and supports simultaneously writing and reading the same set of keys.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
public class LoadTestTool extends AbstractHBaseTool {
private static final Log LOG = LogFactory.getLog(LoadTestTool.class);

View File

@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.http.InfoServer;
import org.apache.hadoop.hbase.thrift.ThriftServerRunner.ImplType;
import org.apache.hadoop.hbase.util.VersionInfo;
@ -41,7 +42,7 @@ import org.apache.hadoop.util.Shell.ExitCodeException;
* Hbase API specified in the Hbase.thrift IDL file. The server runs in an
* independent process.
*/
@InterfaceAudience.Private
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
public class ThriftServer {
private static final Log LOG = LogFactory.getLog(ThriftServer.class);

View File

@ -50,6 +50,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.filter.ParseFilter;
import org.apache.hadoop.hbase.http.InfoServer;
import org.apache.hadoop.hbase.security.SecurityUtil;
@ -88,7 +89,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
* ThriftServer - this class starts up a Thrift server which implements the HBase API specified in the
* HbaseClient.thrift IDL file.
*/
@InterfaceAudience.Private
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
@SuppressWarnings({ "rawtypes", "unchecked" })
public class ThriftServer {
private static final Log log = LogFactory.getLog(ThriftServer.class);
@ -106,7 +107,7 @@ public class ThriftServer {
public static final int DEFAULT_LISTEN_PORT = 9090;
public ThriftServer() {
}