diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java index 37d5ccc450d..6ef54758624 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; * its session but didn't figure it yet. */ @SuppressWarnings("serial") -@InterfaceAudience.Public +@InterfaceAudience.Private @InterfaceStability.Stable public class YouAreDeadException extends IOException { public YouAreDeadException(String message) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java index b02caa8ca97..b6349c24ca6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java @@ -29,7 +29,9 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; * Defines methods to create new HTableInterface. * * @since 0.21.0 + * @deprecated in favor of {@link ConnectionFactory} and {@link Connection}. */ +@Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public interface HTableInterfaceFactory { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java index 793c3a1f492..9245f81ab10 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java @@ -146,7 +146,7 @@ public abstract class Query extends OperationWithAttributes { return this.targetReplicaId; } - /* + /** * Set the isolation level for this query. If the * isolation level is set to READ_UNCOMMITTED, then * this query will return data from committed and @@ -161,7 +161,8 @@ public abstract class Query extends OperationWithAttributes { setAttribute(ISOLATION_LEVEL, level.toBytes()); return this; } - /* + + /** * @return The isolation level of this query. * If no isolation level was set for this query object, * then it returns READ_COMMITTED. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java index 346342eaae0..e7c1acb5ee3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.util.Bytes; @@ -38,8 +37,7 @@ import org.apache.hadoop.hbase.util.Bytes; /** * A reversed ScannerCallable which supports backward scanning. */ -@InterfaceAudience.Public -@InterfaceStability.Evolving +@InterfaceAudience.Private public class ReversedScannerCallable extends ScannerCallable { /** * The start row for locating regions. In reversed scanner, may locate the diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java index 34c43ce05f6..fd1c43215fc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; @InterfaceAudience.Private public class RegionState { - @InterfaceAudience.Public + @InterfaceAudience.Private @InterfaceStability.Evolving public enum State { OFFLINE, // region is in an offline state diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java index 8ec3b6a67af..1e04948eb12 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java @@ -35,6 +35,7 @@ import java.util.Properties; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.util.Strings; @@ -52,10 +53,10 @@ import org.apache.zookeeper.server.quorum.QuorumPeerMain; * control over the process. This class uses {@link ZKConfig} to parse the * zoo.cfg and inject variables from HBase's site.xml configuration in. */ -@InterfaceAudience.Public +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving public class HQuorumPeer { - + /** * Parse ZooKeeper configuration from HBase XML config and run a QuorumPeer. * @param args String[] of command line arguments. Not used. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java index 840bbdc14da..2e58913345f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java @@ -39,4 +39,10 @@ public final class HBaseInterfaceAudience { * Denotes class names that appear in user facing configuration files. */ public static final String CONFIG = "Configuration"; + + /** + * Denotes classes used as tools (Used from cmd line). Usually, the compatibility is required + * for class name, and arguments. + */ + public static final String TOOLS = "Tools"; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index 9f59b062c61..bb52fdb914b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.rest.filter.AuthFilter; import org.apache.hadoop.hbase.security.UserProvider; @@ -63,7 +64,7 @@ import com.sun.jersey.spi.container.servlet.ServletContainer; *
  • -ro --readonly : server mode
  • * */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class RESTServer implements Constants { private static void printUsageAndExit(Options options, int exitCode) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index 8c018c6b1b6..7b92df91274 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -48,6 +48,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -77,7 +78,7 @@ import com.yammer.metrics.reporting.ConsoleReporter; /** * Implements pretty-printing functionality for {@link HFile}s. */ -@InterfaceAudience.Public +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving public class HFilePrettyPrinter extends Configured implements Tool { @@ -206,6 +207,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { * Runs the command-line pretty-printer, and returns the desired command * exit code (zero for success, non-zero for failure). */ + @Override public int run(String[] args) { if (getConf() == null) { throw new RuntimeException("A Configuration instance must be provided."); @@ -261,7 +263,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { boolean shouldScanKeysValues = false; if (this.isSeekToRow) { // seek to the first kv on this row - shouldScanKeysValues = + shouldScanKeysValues = (scanner.seekTo(KeyValueUtil.createFirstOnRow(this.row).getKey()) != -1); } else { shouldScanKeysValues = scanner.seekTo(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java index 6ce4492a53c..c23fc84e5c3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java @@ -18,23 +18,25 @@ */ package org.apache.hadoop.hbase.mapred; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.util.ProgramDriver; + import com.google.common.annotations.VisibleForTesting; /** * Driver for hbase mapreduce jobs. Select which to run by passing name of job * to this main. */ -@InterfaceAudience.Public +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Stable public class Driver { private static ProgramDriver pgd = new ProgramDriver(); @VisibleForTesting - static void setProgramDriver(ProgramDriver pgd0) { + static void setProgramDriver(ProgramDriver pgd0) { pgd = pgd0; } @@ -45,6 +47,6 @@ public class Driver { public static void main(String[] args) throws Throwable { pgd.addClass(RowCounter.NAME, RowCounter.class, "Count rows in HBase table"); ProgramDriver.class.getMethod("driver", new Class[] { String[].class }) - .invoke(pgd, new Object[] { args }); + .invoke(pgd, new Object[] { args }); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java index 01db92e6c89..4371894a9ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.mapreduce; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; @@ -27,7 +28,7 @@ import org.apache.hadoop.util.ProgramDriver; * Driver for hbase mapreduce jobs. Select which to run by passing * name of job to this main. */ -@InterfaceAudience.Public +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Stable public class Driver { /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 06b0e3ce4c7..8d2506d04e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -159,7 +160,7 @@ import com.google.protobuf.Service; * * @see org.apache.zookeeper.Watcher */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @SuppressWarnings("deprecation") public class HMaster extends HRegionServer implements MasterServices, Server { private static final Log LOG = LogFactory.getLog(HMaster.class.getName()); @@ -424,6 +425,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { /** * For compatibility, if failed with regionserver credentials, try the master one */ + @Override protected void login(UserProvider user, String host) throws IOException { try { super.login(user, host); @@ -438,6 +440,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { * wait till a backup master becomes active. * Otherwise, loop till the server is stopped or aborted. */ + @Override protected void waitForMasterActive(){ boolean tablesOnMaster = BaseLoadBalancer.tablesOnMaster(conf); while (!(tablesOnMaster && isActiveMaster) @@ -455,22 +458,27 @@ public class HMaster extends HRegionServer implements MasterServices, Server { return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC); } + @Override protected String getProcessName() { return MASTER; } + @Override protected boolean canCreateBaseZNode() { return true; } + @Override protected boolean canUpdateTableDescriptor() { return true; } + @Override protected RSRpcServices createRpcServices() throws IOException { return new MasterRpcServices(this); } + @Override protected void configureInfoServer() { infoServer.addServlet("master-status", "/master-status", MasterStatusServlet.class); infoServer.setAttribute(MASTER, this); @@ -479,6 +487,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } } + @Override protected Class getDumpServlet() { return MasterDumpServlet.class; } @@ -487,6 +496,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { * Emit the HMaster metrics, such as region in transition metrics. * Surrounding in a try block just to be sure metrics doesn't abort HMaster. */ + @Override protected void doMetrics() { try { if (assignmentManager != null) { @@ -963,6 +973,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } } + @Override protected void stopServiceThreads() { if (masterJettyServer != null) { LOG.info("Stopping master jetty server"); @@ -1370,6 +1381,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { masterInfoPort, this); // Start a thread to try to become the active master, so we won't block here Threads.setDaemonThreadRunning(new Thread(new Runnable() { + @Override public void run() { int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index 61d890a2486..96f4a3165ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -27,7 +27,6 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -42,7 +41,6 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.util.LineReader; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; - import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.JobContext; @@ -50,8 +48,8 @@ import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.FileSplit; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; - import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -73,7 +71,7 @@ import org.apache.hadoop.hbase.util.FSUtils; *
  • family folder (the store files will be compacted) * */ -@InterfaceAudience.Public +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class CompactionTool extends Configured implements Tool { private static final Log LOG = LogFactory.getLog(CompactionTool.class); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index ae749b15ff3..9bd7abc4d40 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.ClockOutOfSyncException; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.CoordinatedStateManagerFactory; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HealthCheckChore; @@ -178,7 +179,7 @@ import com.google.protobuf.ServiceException; * HRegionServer makes a set of HRegions available to clients. It checks in with * the HMaster. There are many HRegionServers in a single HBase deployment. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @SuppressWarnings("deprecation") public class HRegionServer extends HasThread implements RegionServerServices, LastSequenceId { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryTuner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryTuner.java index 1d0fb73f1bc..78a1ef6dff5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryTuner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryTuner.java @@ -28,13 +28,13 @@ import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerResult; * Makes the decision regarding proper sizing of the heap memory. Decides what percentage of heap * memory should be allocated for global memstore and BlockCache. */ -@InterfaceAudience.Public +@InterfaceAudience.Private @InterfaceStability.Evolving public interface HeapMemoryTuner extends Configurable { /** * Perform the heap memory tuning operation. - * + * * @param context * @return TunerResult including the heap percentage for memstore and block cache */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java index 8c645b00eb9..cf3b5c4b1d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java @@ -19,26 +19,26 @@ package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; import java.io.PrintStream; - import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.wal.WALPrettyPrinter; /** * HLogPrettyPrinter prints the contents of a given HLog with a variety of * options affecting formatting and extent of content. - * + * * It targets two usage cases: pretty printing for ease of debugging directly by * humans, and JSON output for consumption by monitoring and/or maintenance * scripts. - * + * * It can filter by row, region, or sequence id. - * + * * It can also toggle output of values. - * + * * @deprecated use the "hbase wal" command */ -@InterfaceAudience.Public +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving @Deprecated public class HLogPrettyPrinter extends WALPrettyPrinter { @@ -52,7 +52,7 @@ public class HLogPrettyPrinter extends WALPrettyPrinter { /** * Fully specified constructor. - * + * * @param outputValues * when true, enables output of values along with other log * information @@ -72,7 +72,7 @@ public class HLogPrettyPrinter extends WALPrettyPrinter { * keeps a single list running for multiple files. if enabled, the * endPersistentOutput() method must be used! * @param out - * Specifies an alternative to stdout for the destination of this + * Specifies an alternative to stdout for the destination of this * PrettyPrinter's output. */ public HLogPrettyPrinter(boolean outputValues, boolean outputJSON, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index 80fd671a0e8..fd65754e03e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -45,7 +46,7 @@ import org.apache.hadoop.io.compress.Compressor; * Compression validation test. Checks compression is working. Be sure to run * on every node in your cluster. */ -@InterfaceAudience.Public +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving public class CompressionTest { static final Log LOG = LogFactory.getLog(CompressionTest.class); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index bcda25c316b..9100d31c585 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -177,7 +178,7 @@ import com.google.protobuf.ServiceException; * can be used to limit the kinds of repairs hbck will do. See the code in * {@link #printUsageAndExit()} for more details. */ -@InterfaceAudience.Public +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving public class HBaseFsck extends Configured { public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java index 4caa1807400..e8f073db034 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java @@ -19,6 +19,8 @@ package org.apache.hadoop.hbase.util; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -29,6 +31,7 @@ import org.apache.log4j.Logger; * Generate a classpath string containing any jars required by mapreduce jobs. Specify * additional values by providing a comma-separated list of paths via -Dtmpjars. */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class MapreduceDependencyClasspathTool implements Tool { private Configuration conf; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java index bc1bcbcd814..6002f29c1d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java @@ -24,11 +24,11 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -53,8 +53,7 @@ import com.google.common.base.Preconditions; * Utility that can merge any two regions in the same table: adjacent, * overlapping or disjoint. */ -@InterfaceAudience.Public -@InterfaceStability.Evolving +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class Merge extends Configured implements Tool { static final Log LOG = LogFactory.getLog(Merge.class); private Path rootdir; @@ -77,6 +76,7 @@ public class Merge extends Configured implements Tool { setConf(conf); } + @Override public int run(String[] args) throws Exception { if (parseArgs(args) != 0) { return -1; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java index 343be0d90d6..efc141aec46 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HBaseFsck; import org.apache.hadoop.io.MultipleIOException; @@ -36,13 +37,13 @@ import org.apache.hadoop.io.MultipleIOException; * are any problem detected, it will fail suggesting actions for the user to do * to "fix" problems. If it succeeds, it will backup the previous hbase:meta and * -ROOT- dirs and write new tables in place. - * + * * This is an advanced feature, so is only exposed for use if explicitly * mentioned. - * + * * hbase org.apache.hadoop.hbase.util.hbck.OfflineMetaRepair ... */ -@InterfaceAudience.Public +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving public class OfflineMetaRepair { private static final Log LOG = LogFactory.getLog(OfflineMetaRepair.class.getName()); @@ -62,7 +63,7 @@ public class OfflineMetaRepair { /** * Main program - * + * * @param args * @throws Exception */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java index 280f731f81e..104faad3013 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java @@ -34,12 +34,14 @@ import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.cli.PosixParser; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -51,17 +53,18 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit; /** * WALPrettyPrinter prints the contents of a given WAL with a variety of * options affecting formatting and extent of content. - * + * * It targets two usage cases: pretty printing for ease of debugging directly by * humans, and JSON output for consumption by monitoring and/or maintenance * scripts. - * + * * It can filter by row, region, or sequence id. - * + * * It can also toggle output of values. - * + * */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) +@InterfaceStability.Evolving public class WALPrettyPrinter { private boolean outputValues; private boolean outputJSON; @@ -93,7 +96,7 @@ public class WALPrettyPrinter { /** * Fully specified constructor. - * + * * @param outputValues * when true, enables output of values along with other log * information @@ -113,7 +116,7 @@ public class WALPrettyPrinter { * keeps a single list running for multiple files. if enabled, the * endPersistentOutput() method must be used! * @param out - * Specifies an alternative to stdout for the destination of this + * Specifies an alternative to stdout for the destination of this * PrettyPrinter's output. */ public WALPrettyPrinter(boolean outputValues, boolean outputJSON, @@ -162,7 +165,7 @@ public class WALPrettyPrinter { /** * sets the region by which output will be filtered - * + * * @param sequence * when nonnegative, serves as a filter; only log entries with this * sequence id will be printed @@ -173,7 +176,7 @@ public class WALPrettyPrinter { /** * sets the region by which output will be filtered - * + * * @param region * when not null, serves as a filter; only log entries from this * region will be printed @@ -184,7 +187,7 @@ public class WALPrettyPrinter { /** * sets the region by which output will be filtered - * + * * @param row * when not null, serves as a filter; only log entries from this row * will be printed @@ -221,7 +224,7 @@ public class WALPrettyPrinter { /** * reads a log file and outputs its contents, one transaction at a time, as * specified by the currently configured options - * + * * @param conf * the HBase configuration relevant to this log file * @param p @@ -339,7 +342,7 @@ public class WALPrettyPrinter { /** * Pass one or more log file names and formatting options and it will dump out * a text version of the contents on stdout. - * + * * @param args * Command line arguments * @throws IOException diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java index 29c201fbd64..73483da70aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java @@ -23,16 +23,15 @@ import java.util.Properties; import java.util.Map.Entry; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; /** * Tool for reading ZooKeeper servers from HBase XML configuration and producing * a line-by-line list for use by bash scripts. */ -@InterfaceAudience.Public -@InterfaceStability.Evolving +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class ZKServerTool { /** * Run the tool. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServer.java index 65e1460ed4e..86348a3e103 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServer.java @@ -27,6 +27,8 @@ import java.util.Properties; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooKeeperMain; @@ -34,6 +36,7 @@ import org.apache.zookeeper.ZooKeeperMain; * Tool for running ZookeeperMain from HBase by reading a ZooKeeper server * from HBase XML configuration. */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class ZooKeeperMainServer { private static final String SERVER_ARG = "-server"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index f64c1515e00..848b6264155 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -28,6 +28,7 @@ import org.apache.commons.math.random.RandomDataImpl; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; @@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.util.Bytes; /** * This class runs performance benchmarks for {@link HFile}. */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class HFilePerformanceEvaluation { private static final int ROW_LENGTH = 10; private static final int ROW_COUNT = 1000000; @@ -91,6 +93,7 @@ public class HFilePerformanceEvaluation { runBenchmark(new SequentialWriteBenchmark(conf, fs, mf, ROW_COUNT), ROW_COUNT); PerformanceEvaluationCommons.concurrentReads(new Runnable() { + @Override public void run() { try { runBenchmark(new UniformRandomSmallScan(conf, fs, mf, ROW_COUNT), @@ -101,6 +104,7 @@ public class HFilePerformanceEvaluation { } }); PerformanceEvaluationCommons.concurrentReads(new Runnable() { + @Override public void run() { try { runBenchmark(new UniformRandomReadBenchmark(conf, fs, mf, ROW_COUNT), @@ -111,6 +115,7 @@ public class HFilePerformanceEvaluation { } }); PerformanceEvaluationCommons.concurrentReads(new Runnable() { + @Override public void run() { try { runBenchmark(new GaussianRandomReadBenchmark(conf, fs, mf, ROW_COUNT), @@ -121,6 +126,7 @@ public class HFilePerformanceEvaluation { } }); PerformanceEvaluationCommons.concurrentReads(new Runnable() { + @Override public void run() { try { runBenchmark(new SequentialReadBenchmark(conf, fs, mf, ROW_COUNT), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java index e4ed1a81cfe..fd829276a29 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java @@ -37,7 +37,9 @@ import org.apache.hadoop.io.WritableComparable; *

    * This class runs performance benchmarks for {@link MapFile}. *

    + * @deprecated HBase does not use MapFiles any more. */ +@Deprecated public class MapFilePerformanceEvaluation { protected final Configuration conf; private static final int ROW_LENGTH = 10; @@ -70,6 +72,7 @@ public class MapFilePerformanceEvaluation { ROW_COUNT); PerformanceEvaluationCommons.concurrentReads(new Runnable() { + @Override public void run() { try { runBenchmark(new UniformRandomSmallScan(conf, fs, mf, ROW_COUNT), @@ -80,6 +83,7 @@ public class MapFilePerformanceEvaluation { } }); PerformanceEvaluationCommons.concurrentReads(new Runnable() { + @Override public void run() { try { runBenchmark(new UniformRandomReadBenchmark(conf, fs, mf, ROW_COUNT), @@ -90,6 +94,7 @@ public class MapFilePerformanceEvaluation { } }); PerformanceEvaluationCommons.concurrentReads(new Runnable() { + @Override public void run() { try { runBenchmark(new GaussianRandomReadBenchmark(conf, fs, mf, ROW_COUNT), @@ -100,6 +105,7 @@ public class MapFilePerformanceEvaluation { } }); PerformanceEvaluationCommons.concurrentReads(new Runnable() { + @Override public void run() { try { runBenchmark(new SequentialReadBenchmark(conf, fs, mf, ROW_COUNT), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 08b09465f4a..7524d5c993c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -47,6 +47,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -113,6 +114,7 @@ import com.yammer.metrics.stats.UniformSample; * specifying {@code --nomapred}. Each client does about 1GB of data, unless * specified otherwise. */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class PerformanceEvaluation extends Configured implements Tool { protected static final Log LOG = LogFactory.getLog(PerformanceEvaluation.class.getName()); private static final ObjectMapper MAPPER = new ObjectMapper(); @@ -657,7 +659,7 @@ public class PerformanceEvaluation extends Configured implements Tool { public void setCycles(final int cycles) { this.cycles = cycles; } - + public boolean isValueZipf() { return valueZipf; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java index dfafa836c85..6b8e63d668e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java @@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -53,6 +54,7 @@ import com.google.common.base.Stopwatch; * A simple performance evaluation tool for single client and MR scans * and snapshot scans. */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class ScanPerformanceEvaluation extends AbstractHBaseTool { private static final String HBASE_COUNTER_GROUP_NAME = "HBase Counters"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index 45ddddb098b..90e07b352f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -35,11 +35,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -63,6 +65,7 @@ import org.apache.hadoop.util.ToolRunner; * {@link PerformanceEvaluation}, this tool validates the data written, * and supports simultaneously writing and reading the same set of keys. */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class LoadTestTool extends AbstractHBaseTool { private static final Log LOG = LogFactory.getLog(LoadTestTool.class); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java index 2e3613d63dc..052c9e12ddf 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.thrift.ThriftServerRunner.ImplType; import org.apache.hadoop.hbase.util.VersionInfo; @@ -41,7 +42,7 @@ import org.apache.hadoop.util.Shell.ExitCodeException; * Hbase API specified in the Hbase.thrift IDL file. The server runs in an * independent process. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class ThriftServer { private static final Log LOG = LogFactory.getLog(ThriftServer.class); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java index 7171bb1c5ff..f79276c8e3d 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java @@ -50,6 +50,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.filter.ParseFilter; import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.security.SecurityUtil; @@ -88,7 +89,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; * ThriftServer - this class starts up a Thrift server which implements the HBase API specified in the * HbaseClient.thrift IDL file. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @SuppressWarnings({ "rawtypes", "unchecked" }) public class ThriftServer { private static final Log log = LogFactory.getLog(ThriftServer.class); @@ -106,7 +107,7 @@ public class ThriftServer { public static final int DEFAULT_LISTEN_PORT = 9090; - + public ThriftServer() { }