From e79eefe78badfdd204fc796dc55f62fafd376ccc Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 10 Jul 2013 18:50:21 +0000 Subject: [PATCH] HBASE-8918 Removes redundant identifiers from interfaces; REVERT -- PREMATURE APPLICATION git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1501909 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/hbase/Abortable.java | 4 +- .../org/apache/hadoop/hbase/Coprocessor.java | 12 +- .../hadoop/hbase/CoprocessorEnvironment.java | 14 +- .../java/org/apache/hadoop/hbase/Server.java | 8 +- .../org/apache/hadoop/hbase/Stoppable.java | 4 +- .../hadoop/hbase/catalog/MetaReader.java | 2 +- .../hadoop/hbase/client/AsyncProcess.java | 2 +- .../hadoop/hbase/client/Attributes.java | 6 +- .../hbase/client/ClusterStatusListener.java | 8 +- .../hadoop/hbase/client/HConnection.java | 98 +++++------ .../hadoop/hbase/client/HTableInterface.java | 24 ++- .../MasterAdminKeepAliveConnection.java | 4 +- .../hadoop/hbase/client/MetaScanner.java | 2 +- .../hadoop/hbase/client/ResultScanner.java | 6 +- .../org/apache/hadoop/hbase/client/Row.java | 4 +- .../hbase/client/coprocessor/Batch.java | 10 +- .../hbase/replication/ReplicationPeers.java | 30 ++-- .../hbase/replication/ReplicationQueues.java | 24 +-- .../replication/ReplicationQueuesClient.java | 8 +- .../org/apache/hadoop/hbase/util/PoolMap.java | 12 +- .../hadoop/hbase/CompoundConfiguration.java | 2 +- .../org/apache/hadoop/hbase/KeyValue.java | 8 +- .../org/apache/hadoop/hbase/codec/Codec.java | 4 +- .../org/apache/hadoop/hbase/io/HeapSize.java | 4 +- .../hbase/io/encoding/DataBlockEncoder.java | 55 +++--- .../encoding/HFileBlockDecodingContext.java | 11 +- .../encoding/HFileBlockEncodingContext.java | 18 +- .../org/apache/hadoop/hbase/util/Bytes.java | 5 +- .../org/apache/hadoop/hbase/ClassFinder.java | 12 +- .../java/org/apache/hadoop/hbase/Waiter.java | 4 +- .../hbase/ipc/MetricsHBaseServerSource.java | 52 +++--- .../hbase/master/MetricsMasterSource.java | 80 ++++----- .../hadoop/hbase/metrics/BaseSource.java | 2 +- .../MetricsRegionAggregateSource.java | 8 +- .../MetricsRegionServerSource.java | 166 +++++++++--------- .../MetricsRegionServerWrapper.java | 10 +- .../regionserver/MetricsRegionSource.java | 4 +- .../wal/MetricsEditsReplaySource.java | 20 +-- .../regionserver/wal/MetricsWALSource.java | 28 +-- .../MetricsReplicationSource.java | 8 +- .../hadoop/hbase/rest/MetricsRESTSource.java | 22 +-- .../thrift/MetricsThriftServerSource.java | 12 +- .../MetricsThriftServerSourceFactory.java | 12 +- .../hadoop/metrics2/MetricHistogram.java | 16 +- .../org/apache/hadoop/hbase/HadoopShims.java | 2 +- .../hbase/test/MetricsAssertHelper.java | 28 +-- .../prefixtree/builder/TestTokenizerData.java | 2 +- .../prefixtree/column/TestColumnData.java | 2 +- .../codec/prefixtree/row/TestRowData.java | 2 +- .../timestamp/TestTimestampData.java | 2 +- .../apache/hadoop/hbase/InterProcessLock.java | 16 +- .../hbase/InterProcessReadWriteLock.java | 4 +- .../apache/hadoop/hbase/TableDescriptors.java | 10 +- .../hadoop/hbase/constraint/Constraint.java | 2 +- .../hbase/coprocessor/CoprocessorService.java | 2 +- .../RegionCoprocessorEnvironment.java | 6 +- .../WALCoprocessorEnvironment.java | 2 +- .../ForeignExceptionListener.java | 4 +- .../errorhandling/ForeignExceptionSnare.java | 6 +- .../hadoop/hbase/executor/EventHandler.java | 4 +- .../apache/hadoop/hbase/fs/HFileSystem.java | 4 +- .../hadoop/hbase/io/WritableWithSize.java | 2 +- .../hadoop/hbase/io/hfile/BlockCache.java | 26 +-- .../hadoop/hbase/io/hfile/Cacheable.java | 8 +- .../hbase/io/hfile/CacheableDeserializer.java | 6 +- .../hbase/io/hfile/HFileDataBlockEncoder.java | 35 ++-- .../hadoop/hbase/io/hfile/HFileScanner.java | 30 ++-- .../apache/hadoop/hbase/ipc/Delayable.java | 14 +- .../hbase/ipc/HBaseRPCErrorHandler.java | 2 +- .../hadoop/hbase/ipc/RpcServerInterface.java | 4 +- .../hbase/master/AssignmentManager.java | 4 +- .../hbase/master/ClusterStatusPublisher.java | 8 +- .../hadoop/hbase/master/LoadBalancer.java | 26 +-- .../hadoop/hbase/master/MasterServices.java | 43 +++-- .../hadoop/hbase/master/SnapshotSentinel.java | 12 +- .../hadoop/hbase/master/SplitLogManager.java | 6 +- .../hadoop/hbase/master/TableLockManager.java | 6 +- .../master/cleaner/FileCleanerDelegate.java | 2 +- .../master/handler/TotesHRegionInfo.java | 2 +- .../hbase/monitoring/MonitoredRPCHandler.java | 22 +-- .../hbase/monitoring/MonitoredTask.java | 36 ++-- .../procedure/ProcedureCoordinatorRpcs.java | 10 +- .../hbase/procedure/ProcedureMemberRpcs.java | 12 +- .../hbase/procedure/SubprocedureFactory.java | 2 +- .../hbase/regionserver/ColumnTracker.java | 20 +-- .../regionserver/CompactionRequestor.java | 22 +-- .../hbase/regionserver/DeleteTracker.java | 18 +- .../hadoop/hbase/regionserver/HRegion.java | 2 +- .../hbase/regionserver/InternalScanner.java | 6 +- .../hbase/regionserver/KeyValueScanner.java | 25 ++- .../hbase/regionserver/LastSequenceId.java | 2 +- .../hbase/regionserver/LeaseListener.java | 2 +- .../hbase/regionserver/OnlineRegions.java | 8 +- .../hbase/regionserver/RegionScanner.java | 14 +- .../regionserver/RegionServerServices.java | 28 +-- .../regionserver/ReplicationService.java | 9 +- .../regionserver/ReplicationSinkService.java | 4 +- .../ReplicationSourceService.java | 4 +- .../hbase/regionserver/SplitLogWorker.java | 6 +- .../hadoop/hbase/regionserver/Store.java | 122 ++++++------- .../regionserver/StoreConfigInformation.java | 8 +- .../hbase/regionserver/StoreFileManager.java | 34 ++-- .../hbase/regionserver/wal/Dictionary.java | 10 +- .../hadoop/hbase/regionserver/wal/HLog.java | 87 ++++----- .../regionserver/wal/WALActionsListener.java | 22 ++- .../ReplicationSourceInterface.java | 28 ++- .../apache/hadoop/hbase/rest/Constants.java | 22 +-- .../hbase/rest/ProtobufMessageHandler.java | 6 +- .../CodeToClassAndBackFor96Migration.java | 4 +- .../hbase/thrift/IncrementCoalescerMBean.java | 28 +-- .../org/apache/hadoop/hbase/tool/Canary.java | 6 +- .../hbase/util/CancelableProgressable.java | 2 +- .../apache/hadoop/hbase/util/HBaseFsck.java | 36 ++-- .../apache/hadoop/hbase/util/KeyRange.java | 4 +- .../hadoop/hbase/util/ModifyRegionUtils.java | 2 +- .../hadoop/hbase/util/RegionSplitter.java | 4 +- .../apache/hadoop/hbase/HBaseTestCase.java | 21 ++- .../hadoop/hbase/PerformanceEvaluation.java | 2 +- .../hbase/io/hfile/RandomDistribution.java | 4 +- .../hbase/master/TestSplitLogManager.java | 2 +- .../hbase/rest/PerformanceEvaluation.java | 2 +- .../token/TestTokenAuthentication.java | 2 +- 122 files changed, 903 insertions(+), 986 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java index 83b670351ac..a88cf31ccd4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java @@ -35,11 +35,11 @@ public interface Abortable { * @param why Why we're aborting. * @param e Throwable that caused abort. Can be null. */ - void abort(String why, Throwable e); + public void abort(String why, Throwable e); /** * Check if the server or client was aborted. * @return true if the server or client was aborted, false otherwise */ - boolean isAborted(); + public boolean isAborted(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java index e3e23508a1c..e097d8f5730 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java @@ -26,21 +26,21 @@ import java.io.IOException; @InterfaceAudience.Public @InterfaceStability.Evolving public interface Coprocessor { - int VERSION = 1; + static final int VERSION = 1; /** Highest installation priority */ - int PRIORITY_HIGHEST = 0; + static final int PRIORITY_HIGHEST = 0; /** High (system) installation priority */ - int PRIORITY_SYSTEM = Integer.MAX_VALUE / 4; + static final int PRIORITY_SYSTEM = Integer.MAX_VALUE / 4; /** Default installation priority for user coprocessors */ - int PRIORITY_USER = Integer.MAX_VALUE / 2; + static final int PRIORITY_USER = Integer.MAX_VALUE / 2; /** Lowest installation priority */ - int PRIORITY_LOWEST = Integer.MAX_VALUE; + static final int PRIORITY_LOWEST = Integer.MAX_VALUE; /** * Lifecycle state of a given coprocessor instance. */ - enum State { + public enum State { UNINSTALLED, INSTALLED, STARTING, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java index 30d7ff6295c..9b1e1e50de3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java @@ -30,26 +30,26 @@ import java.io.IOException; public interface CoprocessorEnvironment { /** @return the Coprocessor interface version */ - int getVersion(); + public int getVersion(); /** @return the HBase version as a string (e.g. "0.21.0") */ - String getHBaseVersion(); + public String getHBaseVersion(); /** @return the loaded coprocessor instance */ - Coprocessor getInstance(); + public Coprocessor getInstance(); /** @return the priority assigned to the loaded coprocessor */ - int getPriority(); + public int getPriority(); /** @return the load sequence number */ - int getLoadSequence(); + public int getLoadSequence(); /** @return the configuration */ - Configuration getConfiguration(); + public Configuration getConfiguration(); /** * @return an interface for accessing the given table * @throws IOException */ - HTableInterface getTable(byte[] tableName) throws IOException; + public HTableInterface getTable(byte[] tableName) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java index e506e5b7677..2106710c56e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java @@ -32,20 +32,20 @@ public interface Server extends Abortable, Stoppable { /** * Gets the configuration object for this server. */ - Configuration getConfiguration(); + public Configuration getConfiguration(); /** * Gets the ZooKeeper instance for this server. */ - ZooKeeperWatcher getZooKeeper(); + public ZooKeeperWatcher getZooKeeper(); /** * @return Master's instance of {@link CatalogTracker} */ - CatalogTracker getCatalogTracker(); + public CatalogTracker getCatalogTracker(); /** * @return The unique server name for this server. */ - ServerName getServerName(); + public ServerName getServerName(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Stoppable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Stoppable.java index a7d2aebcf69..93ccc13233f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Stoppable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Stoppable.java @@ -29,10 +29,10 @@ public interface Stoppable { * Stop this service. * @param why Why we're stopping. */ - void stop(String why); + public void stop(String why); /** * @return True if {@link #stop(String)} has been closed. */ - boolean isStopped(); + public boolean isStopped(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java index 243719808e9..ffbc3817c0f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java @@ -558,7 +558,7 @@ public class MetaReader { * @return True if we are to proceed scanning the table, else false if * we are to stop now. */ - boolean visit(final Result r) throws IOException; + public boolean visit(final Result r) throws IOException; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index 58bdd2650d4..e0a4c1d1f22 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -117,7 +117,7 @@ class AsyncProcess { * * */ - interface AsyncProcessCallback { + static interface AsyncProcessCallback { /** * Called on success. originalIndex holds the index in the action list. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java index b1307780bdb..181a04b72de 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java @@ -34,18 +34,18 @@ public interface Attributes { * @param name attribute name * @param value attribute value */ - void setAttribute(String name, byte[] value); + public void setAttribute(String name, byte[] value); /** * Gets an attribute * @param name attribute name * @return attribute value if attribute is set, null otherwise */ - byte[] getAttribute(String name); + public byte[] getAttribute(String name); /** * Gets all attributes * @return unmodifiable map of all attributes */ - Map getAttributesMap(); + public Map getAttributesMap(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java index 0536862b44a..f6aa5b5d123 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java @@ -82,19 +82,19 @@ class ClusterStatusListener implements Closeable { * * @param sn - the server name */ - void newDead(ServerName sn); + public void newDead(ServerName sn); } /** * The interface to be implented by a listener of a cluster status event. */ - interface Listener extends Closeable { + static interface Listener extends Closeable { /** * Called to close the resources, if any. Cannot throw an exception. */ @Override - void close(); + public void close(); /** * Called to connect. @@ -102,7 +102,7 @@ class ClusterStatusListener implements Closeable { * @param conf Configuration to use. * @throws IOException */ - void connect(Configuration conf) throws IOException; + public void connect(Configuration conf) throws IOException; } public ClusterStatusListener(DeadServerHandler dsh, Configuration conf, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index 1e6356f1490..3ae7e21a041 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -62,10 +62,10 @@ public interface HConnection extends Abortable, Closeable { /** * @return Configuration instance being used by this HConnection instance. */ - Configuration getConfiguration(); + public Configuration getConfiguration(); /** @return - true if the master server is running */ - boolean isMasterRunning() + public boolean isMasterRunning() throws MasterNotRunningException, ZooKeeperConnectionException; /** @@ -76,21 +76,21 @@ public interface HConnection extends Abortable, Closeable { * @return true if the table is enabled, false otherwise * @throws IOException if a remote or network exception occurs */ - boolean isTableEnabled(byte[] tableName) throws IOException; + public boolean isTableEnabled(byte[] tableName) throws IOException; /** * @param tableName table name * @return true if the table is disabled, false otherwise * @throws IOException if a remote or network exception occurs */ - boolean isTableDisabled(byte[] tableName) throws IOException; + public boolean isTableDisabled(byte[] tableName) throws IOException; /** * @param tableName table name * @return true if all regions of the table are available, false otherwise * @throws IOException if a remote or network exception occurs */ - boolean isTableAvailable(byte[] tableName) throws IOException; + public boolean isTableAvailable(byte[] tableName) throws IOException; /** * Use this api to check if the table has been created with the specified number of @@ -104,7 +104,7 @@ public interface HConnection extends Abortable, Closeable { * @throws IOException * if a remote or network exception occurs */ - boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws IOException; + public boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws IOException; /** * List all the userspace tables. In other words, scan the META table. @@ -116,14 +116,14 @@ public interface HConnection extends Abortable, Closeable { * @return - returns an array of HTableDescriptors * @throws IOException if a remote or network exception occurs */ - HTableDescriptor[] listTables() throws IOException; + public HTableDescriptor[] listTables() throws IOException; /** * @param tableName table name * @return table metadata * @throws IOException if a remote or network exception occurs */ - HTableDescriptor getHTableDescriptor(byte[] tableName) + public HTableDescriptor getHTableDescriptor(byte[] tableName) throws IOException; /** @@ -135,15 +135,14 @@ public interface HConnection extends Abortable, Closeable { * question * @throws IOException if a remote or network exception occurs */ - HRegionLocation locateRegion( - final byte[] tableName, final byte[] row - ) + public HRegionLocation locateRegion(final byte [] tableName, + final byte [] row) throws IOException; /** * Allows flushing the region cache. */ - void clearRegionCache(); + public void clearRegionCache(); /** * Allows flushing the region cache of all locations that pertain to @@ -151,13 +150,13 @@ public interface HConnection extends Abortable, Closeable { * @param tableName Name of the table whose regions we are to remove from * cache. */ - void clearRegionCache(final byte[] tableName); + public void clearRegionCache(final byte [] tableName); /** * Deletes cached locations for the specific region. * @param location The location object for the region, to be purged from cache. */ - void deleteCachedRegionLocation(final HRegionLocation location); + public void deleteCachedRegionLocation(final HRegionLocation location); /** * Find the location of the region of tableName that row @@ -168,9 +167,8 @@ public interface HConnection extends Abortable, Closeable { * question * @throws IOException if a remote or network exception occurs */ - HRegionLocation relocateRegion( - final byte[] tableName, final byte[] row - ) + public HRegionLocation relocateRegion(final byte [] tableName, + final byte [] row) throws IOException; /** @@ -181,9 +179,8 @@ public interface HConnection extends Abortable, Closeable { * @param exception the exception if any. Can be null. * @param source the previous location */ - void updateCachedLocations( - byte[] tableName, byte[] rowkey, Object exception, HRegionLocation source - ); + public void updateCachedLocations(byte[] tableName, byte[] rowkey, + Object exception, HRegionLocation source); /** * Gets the location of the region of regionName. @@ -192,7 +189,7 @@ public interface HConnection extends Abortable, Closeable { * question * @throws IOException if a remote or network exception occurs */ - HRegionLocation locateRegion(final byte[] regionName) + public HRegionLocation locateRegion(final byte [] regionName) throws IOException; /** @@ -201,7 +198,7 @@ public interface HConnection extends Abortable, Closeable { * @return list of region locations for all regions of table * @throws IOException */ - List locateRegions(final byte[] tableName) + public List locateRegions(final byte[] tableName) throws IOException; /** @@ -213,19 +210,18 @@ public interface HConnection extends Abortable, Closeable { * @return list of region locations for all regions of table * @throws IOException */ - List locateRegions( - final byte[] tableName, final boolean useCache, final boolean offlined - ) throws IOException; + public List locateRegions(final byte[] tableName, final boolean useCache, + final boolean offlined) throws IOException; /** * Returns a {@link MasterAdminKeepAliveConnection} to the active master */ - MasterAdminService.BlockingInterface getMasterAdmin() throws IOException; + public MasterAdminService.BlockingInterface getMasterAdmin() throws IOException; /** * Returns an {@link MasterMonitorKeepAliveConnection} to the active master */ - MasterMonitorService.BlockingInterface getMasterMonitor() throws IOException; + public MasterMonitorService.BlockingInterface getMasterMonitor() throws IOException; /** * Establishes a connection to the region server at the specified address. @@ -233,7 +229,7 @@ public interface HConnection extends Abortable, Closeable { * @return proxy for HRegionServer * @throws IOException if a remote or network exception occurs */ - AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException; + public AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException; /** * Establishes a connection to the region server at the specified address, and returns @@ -244,7 +240,7 @@ public interface HConnection extends Abortable, Closeable { * @throws IOException if a remote or network exception occurs * */ - ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException; + public ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException; /** * Establishes a connection to the region server at the specified address. @@ -254,7 +250,7 @@ public interface HConnection extends Abortable, Closeable { * @throws IOException if a remote or network exception occurs * @deprecated You can pass master flag but nothing special is done. */ - AdminService.BlockingInterface getAdmin(final ServerName serverName, boolean getMaster) + public AdminService.BlockingInterface getAdmin(final ServerName serverName, boolean getMaster) throws IOException; /** @@ -281,7 +277,7 @@ public interface HConnection extends Abortable, Closeable { * @throws RuntimeException other unspecified error */ @Deprecated - T getRegionServerWithRetries(ServerCallable callable) + public T getRegionServerWithRetries(ServerCallable callable) throws IOException, RuntimeException; /** @@ -294,7 +290,7 @@ public interface HConnection extends Abortable, Closeable { * @throws RuntimeException other unspecified error */ @Deprecated - T getRegionServerWithoutRetries(ServerCallable callable) + public T getRegionServerWithoutRetries(ServerCallable callable) throws IOException, RuntimeException; /** @@ -313,9 +309,8 @@ public interface HConnection extends Abortable, Closeable { * @deprecated since 0.96 - Use {@link HTableInterface#batch} instead */ @Deprecated - void processBatch( - List actions, final byte[] tableName, ExecutorService pool, Object[] results - ) + public void processBatch(List actions, final byte[] tableName, + ExecutorService pool, Object[] results) throws IOException, InterruptedException; /** @@ -324,13 +319,11 @@ public interface HConnection extends Abortable, Closeable { * @deprecated since 0.96 - Use {@link HTableInterface#batchCallback} instead */ @Deprecated - void processBatchCallback( - List list, - byte[] tableName, - ExecutorService pool, - Object[] results, - Batch.Callback callback - ) throws IOException, InterruptedException; + public void processBatchCallback(List list, + byte[] tableName, + ExecutorService pool, + Object[] results, + Batch.Callback callback) throws IOException, InterruptedException; /** * Enable or disable region cache prefetch for the table. It will be @@ -339,9 +332,8 @@ public interface HConnection extends Abortable, Closeable { * @param tableName name of table to configure. * @param enable Set to true to enable region cache prefetch. */ - void setRegionCachePrefetch( - final byte[] tableName, final boolean enable - ); + public void setRegionCachePrefetch(final byte[] tableName, + final boolean enable); /** * Check whether region cache prefetch is enabled or not. @@ -349,34 +341,34 @@ public interface HConnection extends Abortable, Closeable { * @return true if table's region cache prefetch is enabled. Otherwise * it is disabled. */ - boolean getRegionCachePrefetch(final byte[] tableName); + public boolean getRegionCachePrefetch(final byte[] tableName); /** * @return the number of region servers that are currently running * @throws IOException if a remote or network exception occurs * @deprecated This method will be changed from public to package protected. */ - int getCurrentNrHRS() throws IOException; + public int getCurrentNrHRS() throws IOException; /** * @param tableNames List of table names * @return HTD[] table metadata * @throws IOException if a remote or network exception occurs */ - HTableDescriptor[] getHTableDescriptors(List tableNames) + public HTableDescriptor[] getHTableDescriptors(List tableNames) throws IOException; /** * @return true if this connection is closed */ - boolean isClosed(); + public boolean isClosed(); /** * Clear any caches that pertain to server name sn. * @param sn A server name */ - void clearCaches(final ServerName sn); + public void clearCaches(final ServerName sn); /** * This function allows HBaseAdmin and potentially others to get a shared MasterMonitor @@ -385,7 +377,7 @@ public interface HConnection extends Abortable, Closeable { * @throws MasterNotRunningException */ // TODO: Why is this in the public interface when the returned type is shutdown package access? - MasterMonitorKeepAliveConnection getKeepAliveMasterMonitorService() + public MasterMonitorKeepAliveConnection getKeepAliveMasterMonitorService() throws MasterNotRunningException; /** @@ -395,11 +387,11 @@ public interface HConnection extends Abortable, Closeable { * @throws MasterNotRunningException */ // TODO: Why is this in the public interface when the returned type is shutdown package access? - MasterAdminKeepAliveConnection getKeepAliveMasterAdminService() throws MasterNotRunningException; + public MasterAdminKeepAliveConnection getKeepAliveMasterAdminService() throws MasterNotRunningException; /** * @param serverName * @return true if the server is known as dead, false otherwise. */ - boolean isDeadServer(ServerName serverName); + public boolean isDeadServer(ServerName serverName); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java index 889f50ba5eb..f66206a5e7f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java @@ -127,9 +127,8 @@ public interface HTableInterface extends Closeable { * Same as {@link #batch(List, Object[])}, but with a callback. * @since 0.96.0 */ - void batchCallback( - final List actions, final Object[] results, final Batch.Callback callback - ) + public void batchCallback( + final List actions, final Object[] results, final Batch.Callback callback) throws IOException, InterruptedException; @@ -137,9 +136,8 @@ public interface HTableInterface extends Closeable { * Same as {@link #batch(List)}, but with a callback. * @since 0.96.0 */ - Object[] batchCallback( - List actions, Batch.Callback callback - ) throws IOException, + public Object[] batchCallback( + List actions, Batch.Callback callback) throws IOException, InterruptedException; /** @@ -311,7 +309,7 @@ public interface HTableInterface extends Closeable { * @param rm object that specifies the set of mutations to perform atomically * @throws IOException */ - void mutateRow(final RowMutations rm) throws IOException; + public void mutateRow(final RowMutations rm) throws IOException; /** * Appends values to one or more columns within a single row. @@ -326,7 +324,7 @@ public interface HTableInterface extends Closeable { * @throws IOException e * @return values of columns after the append operation (maybe null) */ - Result append(final Append append) throws IOException; + public Result append(final Append append) throws IOException; /** * Increments one or more columns within a single row. @@ -341,7 +339,7 @@ public interface HTableInterface extends Closeable { * @throws IOException e * @return values of columns after the increment */ - Result increment(final Increment increment) throws IOException; + public Result increment(final Increment increment) throws IOException; /** * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)} @@ -495,7 +493,7 @@ public interface HTableInterface extends Closeable { * @param autoFlush * Whether or not to enable 'auto-flush'. */ - void setAutoFlush(boolean autoFlush); + public void setAutoFlush(boolean autoFlush); /** * Turns 'auto-flush' on or off. @@ -524,7 +522,7 @@ public interface HTableInterface extends Closeable { * Whether to keep Put failures in the writeBuffer * @see #flushCommits */ - void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail); + public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail); /** * Returns the maximum size in bytes of the write buffer for this HTable. @@ -533,7 +531,7 @@ public interface HTableInterface extends Closeable { * {@code hbase.client.write.buffer}. * @return The size of the write buffer in bytes. */ - long getWriteBufferSize(); + public long getWriteBufferSize(); /** * Sets the size of the buffer in bytes. @@ -543,5 +541,5 @@ public interface HTableInterface extends Closeable { * @param writeBufferSize The new write buffer size, in bytes. * @throws IOException if a remote or network exception occurs. */ - void setWriteBufferSize(long writeBufferSize) throws IOException; + public void setWriteBufferSize(long writeBufferSize) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java index 6354bf9b55d..7c361197ac1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java @@ -40,5 +40,5 @@ extends MasterAdminProtos.MasterAdminService.BlockingInterface { */ // The Closeable Interface wants to throw an IOE out of a close. // Thats a PITA. Do this below instead of Closeable. - void close(); -} + public void close(); +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java index d62e44f6651..bdcf32638cb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java @@ -297,7 +297,7 @@ public class MetaScanner { * @return A boolean to know if it should continue to loop in the region * @throws IOException e */ - boolean processRow(Result rowResult) throws IOException; + public boolean processRow(Result rowResult) throws IOException; } public static abstract class MetaScannerVisitorBase implements MetaScannerVisitor { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java index aad84034e1b..560b33c8a69 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java @@ -38,17 +38,17 @@ public interface ResultScanner extends Closeable, Iterable { * exhausted. * @throws IOException e */ - Result next() throws IOException; + public Result next() throws IOException; /** * @param nbRows number of rows to return * @return Between zero and nbRows Results * @throws IOException e */ - Result [] next(int nbRows) throws IOException; + public Result [] next(int nbRows) throws IOException; /** * Closes the scanner and releases any resources it has allocated */ - void close(); + public void close(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java index e84ceee7827..06fe5e40891 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java @@ -30,5 +30,5 @@ public interface Row extends Comparable { /** * @return The row. */ - byte [] getRow(); -} + public byte [] getRow(); +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java index 65f4295598a..5186da8ff98 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java @@ -50,8 +50,8 @@ public abstract class Batch { * {@link Batch.Call#call(Object)} * @param the return type from {@link Batch.Call#call(Object)} */ - public interface Call { - R call(T instance) throws IOException; + public static interface Call { + public R call(T instance) throws IOException; } /** @@ -68,7 +68,7 @@ public abstract class Batch { * @param the return type from the associated {@link Batch.Call#call(Object)} * @see org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call) */ - public interface Callback { - void update(byte[] region, byte[] row, R result); + public static interface Callback { + public void update(byte[] region, byte[] row, R result); } -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index bfd09910409..7ec77c4b8e3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -46,7 +46,7 @@ public interface ReplicationPeers { * Initialize the ReplicationPeers interface. * @throws KeeperException */ - void init() throws IOException, KeeperException; + public void init() throws IOException, KeeperException; /** * Add a new remote slave cluster for replication. @@ -54,65 +54,65 @@ public interface ReplicationPeers { * @param clusterKey the concatenation of the slave cluster's: * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent */ - void addPeer(String peerId, String clusterKey) throws IOException; + public void addPeer(String peerId, String clusterKey) throws IOException; /** * Removes a remote slave cluster and stops the replication to it. * @param peerId a short that identifies the cluster */ - void removePeer(String peerId) throws IOException; + public void removePeer(String peerId) throws IOException; /** * Restart the replication to the specified remote slave cluster. * @param peerId a short that identifies the cluster */ - void enablePeer(String peerId) throws IOException; + public void enablePeer(String peerId) throws IOException; /** * Stop the replication to the specified remote slave cluster. * @param peerId a short that identifies the cluster */ - void disablePeer(String peerId) throws IOException; + public void disablePeer(String peerId) throws IOException; /** * Get the replication status for the specified connected remote slave cluster. * @param peerId a short that identifies the cluster * @return true if replication is enabled, false otherwise. */ - boolean getStatusOfConnectedPeer(String peerId); + public boolean getStatusOfConnectedPeer(String peerId); /** * Get a set of all connected remote slave clusters. * @return set of peer ids */ - Set getConnectedPeers(); + public Set getConnectedPeers(); /** * List the cluster keys of all remote slave clusters (whether they are enabled/disabled or * connected/disconnected). * @return A map of peer ids to peer cluster keys */ - Map getAllPeerClusterKeys(); + public Map getAllPeerClusterKeys(); /** * List the peer ids of all remote slave clusters (whether they are enabled/disabled or * connected/disconnected). * @return A list of peer ids */ - List getAllPeerIds(); + public List getAllPeerIds(); /** * Attempt to connect to a new remote slave cluster. * @param peerId a short that identifies the cluster * @return true if a new connection was made, false if no new connection was made. */ - boolean connectToPeer(String peerId) throws IOException, KeeperException; + public boolean connectToPeer(String peerId) throws IOException, KeeperException; /** * Disconnect from a remote slave cluster. * @param peerId a short that identifies the cluster */ - void disconnectFromPeer(String peerId); + public void disconnectFromPeer(String peerId); /** * Returns all region servers from given connected remote slave cluster. @@ -120,19 +120,19 @@ public interface ReplicationPeers { * @return addresses of all region servers in the peer cluster. Returns an empty list if the peer * cluster is unavailable or there are no region servers in the cluster. */ - List getRegionServersOfConnectedPeer(String peerId); + public List getRegionServersOfConnectedPeer(String peerId); /** * Returns the UUID of the provided peer id. * @param peerId the peer's ID that will be converted into a UUID * @return a UUID or null if the peer cluster does not exist or is not connected. */ - UUID getPeerUUID(String peerId); + public UUID getPeerUUID(String peerId); /** * Returns the configuration needed to talk to the remote slave cluster. * @param peerId a short that identifies the cluster * @return the configuration for the peer cluster, null if it was unable to get the configuration */ - Configuration getPeerConf(String peerId) throws KeeperException; -} + public Configuration getPeerConf(String peerId) throws KeeperException; +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java index d6410887193..f7fb89f23a1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java @@ -37,13 +37,13 @@ public interface ReplicationQueues { * @param serverName The server name of the region server that owns the replication queues this * interface manages. */ - void init(String serverName) throws KeeperException; + public void init(String serverName) throws KeeperException; /** * Remove a replication queue. * @param queueId a String that identifies the queue. */ - void removeQueue(String queueId); + public void removeQueue(String queueId); /** * Add a new HLog file to the given queue. If the queue does not exist it is created. @@ -51,14 +51,14 @@ public interface ReplicationQueues { * @param filename name of the HLog * @throws KeeperException */ - void addLog(String queueId, String filename) throws KeeperException; + public void addLog(String queueId, String filename) throws KeeperException; /** * Remove an HLog file from the given queue. * @param queueId a String that identifies the queue. * @param filename name of the HLog */ - void removeLog(String queueId, String filename); + public void removeLog(String queueId, String filename); /** * Set the current position for a specific HLog in a given queue. @@ -66,7 +66,7 @@ public interface ReplicationQueues { * @param filename name of the HLog * @param position the current position in the file */ - void setLogPosition(String queueId, String filename, long position); + public void setLogPosition(String queueId, String filename, long position); /** * Get the current position for a specific HLog in a given queue. @@ -74,25 +74,25 @@ public interface ReplicationQueues { * @param filename name of the HLog * @return the current position in the file */ - long getLogPosition(String queueId, String filename) throws KeeperException; + public long getLogPosition(String queueId, String filename) throws KeeperException; /** * Remove all replication queues for this region server. */ - void removeAllQueues(); + public void removeAllQueues(); /** * Get a list of all HLogs in the given queue. * @param queueId a String that identifies the queue * @return a list of HLogs, null if this region server is dead and has no outstanding queues */ - List getLogsInQueue(String queueId); + public List getLogsInQueue(String queueId); /** * Get a list of all queues for this region server. * @return a list of queueIds, null if this region server is dead and has no outstanding queues */ - List getAllQueues(); + public List getAllQueues(); /** * Take ownership for the set of queues belonging to a dead region server. @@ -100,12 +100,12 @@ public interface ReplicationQueues { * @return A SortedMap of the queues that have been claimed, including a SortedSet of HLogs in * each queue. Returns an empty map if no queues were failed-over. */ - SortedMap> claimQueues(String regionserver); + public SortedMap> claimQueues(String regionserver); /** * Get a list of all region servers that have outstanding replication queues. These servers could * be alive, dead or from a previous run of the cluster. * @return a list of server names */ - List getListOfReplicators(); -} + public List getListOfReplicators(); +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java index f8edd2a4c6a..cd8d878b330 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java @@ -31,7 +31,7 @@ public interface ReplicationQueuesClient { * be alive, dead or from a previous run of the cluster. * @return a list of server names */ - List getListOfReplicators(); + public List getListOfReplicators(); /** * Get a list of all HLogs in the given queue on the given region server. @@ -39,12 +39,12 @@ public interface ReplicationQueuesClient { * @param queueId a String that identifies the queue * @return a list of HLogs, null if this region server is dead and has no outstanding queues */ - List getLogsInQueue(String serverName, String queueId); + public List getLogsInQueue(String serverName, String queueId); /** * Get a list of all queues for the specified region server. * @param serverName the server name of the region server that owns the set of queues * @return a list of queueIds, null if this region server is not a replicator. */ - List getAllQueues(String serverName); -} + public List getAllQueues(String serverName); +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java index 41e32299682..9847b309801 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java @@ -216,17 +216,17 @@ public class PoolMap implements Map { } protected interface Pool { - R get(); + public R get(); - R put(R resource); + public R put(R resource); - boolean remove(R resource); + public boolean remove(R resource); - void clear(); + public void clear(); - Collection values(); + public Collection values(); - int size(); + public int size(); } public enum PoolType { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java index 32508887bc1..eb96bc0ad05 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java @@ -66,7 +66,7 @@ public class CompoundConfiguration extends Configuration { // Devs: these APIs are the same contract as their counterparts in // Configuration.java - private interface ImmutableConfigMap extends Iterable> { + private static interface ImmutableConfigMap extends Iterable> { String get(String key); String getRaw(String key); Class getClassByName(String name) throws ClassNotFoundException; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 2b918fd21ab..7eaf75ea814 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -2514,14 +2514,14 @@ public class KeyValue implements Cell, HeapSize, Cloneable { /** * Avoids redundant comparisons for better performance. */ - public interface SamePrefixComparator { + public static interface SamePrefixComparator { /** * Compare two keys assuming that the first n bytes are the same. * @param commonPrefix How many bytes are the same. */ - int compareIgnoringPrefix( - int commonPrefix, T left, int loffset, int llength, T right, int roffset, int rlength - ); + public int compareIgnoringPrefix(int commonPrefix, + T left, int loffset, int llength, + T right, int roffset, int rlength); } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java index 5a6b71ac760..a89cc2b020d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java @@ -37,14 +37,14 @@ public interface Codec { * Call flush when done. Some encoders may not put anything on the stream until flush is called. * On flush, let go of any resources used by the encoder. */ - interface Encoder extends CellOutputStream {} + public interface Encoder extends CellOutputStream {} /** * Implementations should implicitly clean up any resources allocated when the * Decoder/CellScanner runs off the end of the cell block. Do this rather than require the user * call close explicitly. */ - interface Decoder extends CellScanner {}; + public interface Decoder extends CellScanner {}; Decoder getDecoder(InputStream is); Encoder getEncoder(OutputStream os); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/HeapSize.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/HeapSize.java index 4e7a1f617bd..23b172d4720 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/HeapSize.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/HeapSize.java @@ -45,5 +45,5 @@ public interface HeapSize { * @return Approximate 'exclusive deep size' of implementing object. Includes * count of payload and hosting object sizings. */ - long heapSize(); -} + public long heapSize(); +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java index 61496ec6d78..b2ce35ada60 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java @@ -57,9 +57,9 @@ public interface DataBlockEncoder { * @throws IOException * If there is an error writing to output stream. */ - void encodeKeyValues( - ByteBuffer in, boolean includesMemstoreTS, HFileBlockEncodingContext encodingContext - ) throws IOException; + public void encodeKeyValues( + ByteBuffer in, boolean includesMemstoreTS, + HFileBlockEncodingContext encodingContext) throws IOException; /** * Decode. @@ -69,9 +69,8 @@ public interface DataBlockEncoder { * @return Uncompressed block of KeyValues. * @throws IOException If there is an error in source. */ - ByteBuffer decodeKeyValues( - DataInputStream source, boolean includesMemstoreTS - ) throws IOException; + public ByteBuffer decodeKeyValues(DataInputStream source, + boolean includesMemstoreTS) throws IOException; /** * Uncompress. @@ -83,9 +82,8 @@ public interface DataBlockEncoder { * @return Uncompressed block of KeyValues. * @throws IOException If there is an error in source. */ - ByteBuffer decodeKeyValues( - DataInputStream source, int allocateHeaderLength, int skipLastBytes, boolean includesMemstoreTS - ) + public ByteBuffer decodeKeyValues(DataInputStream source, + int allocateHeaderLength, int skipLastBytes, boolean includesMemstoreTS) throws IOException; /** @@ -96,7 +94,7 @@ public interface DataBlockEncoder { * @param block encoded block we want index, the position will not change * @return First key in block. */ - ByteBuffer getFirstKeyInBlock(ByteBuffer block); + public ByteBuffer getFirstKeyInBlock(ByteBuffer block); /** * Create a HFileBlock seeker which find KeyValues within a block. @@ -105,9 +103,8 @@ public interface DataBlockEncoder { * key-value pair * @return A newly created seeker. */ - EncodedSeeker createSeeker( - RawComparator comparator, boolean includesMemstoreTS - ); + public EncodedSeeker createSeeker(RawComparator comparator, + boolean includesMemstoreTS); /** * Creates a encoder specific encoding context @@ -122,9 +119,9 @@ public interface DataBlockEncoder { * is unknown * @return a newly created encoding context */ - HFileBlockEncodingContext newDataBlockEncodingContext( - Algorithm compressionAlgorithm, DataBlockEncoding encoding, byte[] headerBytes - ); + public HFileBlockEncodingContext newDataBlockEncodingContext( + Algorithm compressionAlgorithm, DataBlockEncoding encoding, + byte[] headerBytes); /** * Creates an encoder specific decoding context, which will prepare the data @@ -134,9 +131,8 @@ public interface DataBlockEncoder { * compression algorithm used if the data needs to be decompressed * @return a newly created decoding context */ - HFileBlockDecodingContext newDataBlockDecodingContext( - Algorithm compressionAlgorithm - ); + public HFileBlockDecodingContext newDataBlockDecodingContext( + Algorithm compressionAlgorithm); /** * An interface which enable to seek while underlying data is encoded. @@ -144,19 +140,19 @@ public interface DataBlockEncoder { * It works on one HFileBlock, but it is reusable. See * {@link #setCurrentBuffer(ByteBuffer)}. */ - interface EncodedSeeker { + public static interface EncodedSeeker { /** * Set on which buffer there will be done seeking. * @param buffer Used for seeking. */ - void setCurrentBuffer(ByteBuffer buffer); + public void setCurrentBuffer(ByteBuffer buffer); /** * Does a deep copy of the key at the current position. A deep copy is * necessary because buffers are reused in the decoder. * @return key at current position */ - ByteBuffer getKeyDeepCopy(); + public ByteBuffer getKeyDeepCopy(); /** * Does a shallow copy of the value at the current position. A shallow @@ -164,25 +160,25 @@ public interface DataBlockEncoder { * of the original encoded buffer. * @return value at current position */ - ByteBuffer getValueShallowCopy(); + public ByteBuffer getValueShallowCopy(); /** @return key value at current position with position set to limit */ - ByteBuffer getKeyValueBuffer(); + public ByteBuffer getKeyValueBuffer(); /** * @return the KeyValue object at the current position. Includes memstore * timestamp. */ - KeyValue getKeyValue(); + public KeyValue getKeyValue(); /** Set position to beginning of given block */ - void rewind(); + public void rewind(); /** * Move to next position * @return true on success, false if there is no more positions. */ - boolean next(); + public boolean next(); /** * Moves the seeker position within the current block to: @@ -201,8 +197,7 @@ public interface DataBlockEncoder { * of an exact match. Does not matter in case of an inexact match. * @return 0 on exact match, 1 on inexact match. */ - int seekToKeyInBlock( - byte[] key, int offset, int length, boolean seekBefore - ); + public int seekToKeyInBlock(byte[] key, int offset, int length, + boolean seekBefore); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java index 4b9c9e47dbd..a86cc719dee 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java @@ -32,7 +32,7 @@ public interface HFileBlockDecodingContext { /** * @return the compression algorithm used by this decoding context */ - Compression.Algorithm getCompression(); + public Compression.Algorithm getCompression(); /** * Perform all actions that need to be done before the encoder's real decoding process. @@ -47,12 +47,7 @@ public interface HFileBlockDecodingContext { * @param offset data start offset in onDiskBlock * @throws IOException */ - void prepareDecoding( - int onDiskSizeWithoutHeader, - int uncompressedSizeWithoutHeader, - ByteBuffer blockBufferWithoutHeader, - byte[] onDiskBlock, - int offset - ) throws IOException; + public void prepareDecoding(int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, + ByteBuffer blockBufferWithoutHeader, byte[] onDiskBlock, int offset) throws IOException; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java index b76d5ce8b66..78e2c740624 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java @@ -34,39 +34,39 @@ public interface HFileBlockEncodingContext { /** * @return OutputStream to which encoded data is written */ - OutputStream getOutputStreamForEncoder(); + public OutputStream getOutputStreamForEncoder(); /** * @return encoded and compressed bytes with header which are ready to write * out to disk */ - byte[] getOnDiskBytesWithHeader(); + public byte[] getOnDiskBytesWithHeader(); /** * @return encoded but not heavily compressed bytes with header which can be * cached in block cache */ - byte[] getUncompressedBytesWithHeader(); + public byte[] getUncompressedBytesWithHeader(); /** * @return the block type after encoding */ - BlockType getBlockType(); + public BlockType getBlockType(); /** * @return the compression algorithm used by this encoding context */ - Compression.Algorithm getCompression(); + public Compression.Algorithm getCompression(); /** * sets the dummy header bytes */ - void setDummyHeader(byte[] headerBytes); + public void setDummyHeader(byte[] headerBytes); /** * @return the {@link DataBlockEncoding} encoding used */ - DataBlockEncoding getDataBlockEncoding(); + public DataBlockEncoding getDataBlockEncoding(); /** * Do any action that needs to be performed after the encoding. @@ -76,11 +76,11 @@ public interface HFileBlockEncodingContext { * @param blockType * @throws IOException */ - void postEncoding(BlockType blockType) throws IOException; + public void postEncoding(BlockType blockType) throws IOException; /** * Releases the resources used. */ - void close(); + public void close(); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java index 29210745fb8..8aefe6a25a5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -1000,9 +1000,8 @@ public class Bytes { } interface Comparer { - int compareTo( - T buffer1, int offset1, int length1, T buffer2, int offset2, int length2 - ); + abstract public int compareTo(T buffer1, int offset1, int length1, + T buffer2, int offset2, int length2); } @VisibleForTesting diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java index f0b87e6c439..068dd1a36d4 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java @@ -50,16 +50,16 @@ public class ClassFinder { private ClassFilter classFilter; private FileFilter fileFilter; - public interface ResourcePathFilter { - boolean isCandidatePath(String resourcePath, boolean isJar); + public static interface ResourcePathFilter { + public boolean isCandidatePath(String resourcePath, boolean isJar); }; - public interface FileNameFilter { - boolean isCandidateFile(String fileName, String absFilePath); + public static interface FileNameFilter { + public boolean isCandidateFile(String fileName, String absFilePath); }; - public interface ClassFilter { - boolean isCandidateClass(Class c); + public static interface ClassFilter { + public boolean isCandidateClass(Class c); }; public ClassFinder() { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java index d9f9e7d3ba0..002838f235c 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java @@ -86,14 +86,14 @@ public final class Waiter { * {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate) methods. */ @InterfaceAudience.Private - public interface Predicate { + public static interface Predicate { /** * Perform a predicate evaluation. * @return the boolean result of the evaluation. * @throws Exception thrown if the predicate evaluation could not evaluate. */ - boolean evaluate() throws E; + public boolean evaluate() throws E; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java index 05a7f44baa6..6771a3d399e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java @@ -22,37 +22,37 @@ package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.metrics.BaseSource; public interface MetricsHBaseServerSource extends BaseSource { - String AUTHORIZATION_SUCCESSES_NAME = "authorizationSuccesses"; - String AUTHORIZATION_SUCCESSES_DESC = + public static final String AUTHORIZATION_SUCCESSES_NAME = "authorizationSuccesses"; + public static final String AUTHORIZATION_SUCCESSES_DESC = "Number of authorization successes."; - String AUTHORIZATION_FAILURES_NAME = "authorizationFailures"; - String AUTHORIZATION_FAILURES_DESC = + public static final String AUTHORIZATION_FAILURES_NAME = "authorizationFailures"; + public static final String AUTHORIZATION_FAILURES_DESC = "Number of authorization failures."; - String AUTHENTICATION_SUCCESSES_NAME = "authenticationSuccesses"; - String AUTHENTICATION_SUCCESSES_DESC = + public static final String AUTHENTICATION_SUCCESSES_NAME = "authenticationSuccesses"; + public static final String AUTHENTICATION_SUCCESSES_DESC = "Number of authentication successes."; - String AUTHENTICATION_FAILURES_NAME = "authenticationFailures"; - String AUTHENTICATION_FAILURES_DESC = + public static final String AUTHENTICATION_FAILURES_NAME = "authenticationFailures"; + public static final String AUTHENTICATION_FAILURES_DESC = "Number of authentication failures."; - String SENT_BYTES_NAME = "sentBytes"; - String SENT_BYTES_DESC = "Number of bytes sent."; - String RECEIVED_BYTES_NAME = "receivedBytes"; - String RECEIVED_BYTES_DESC = "Number of bytes received."; - String QUEUE_CALL_TIME_NAME = "queueCallTime"; - String QUEUE_CALL_TIME_DESC = "Queue Call Time."; - String PROCESS_CALL_TIME_NAME = "processCallTime"; - String PROCESS_CALL_TIME_DESC = "Processing call time."; - String QUEUE_SIZE_NAME = "queueSize"; - String QUEUE_SIZE_DESC = "Number of bytes in the call queues."; - String GENERAL_QUEUE_NAME = "numCallsInGeneralQueue"; - String GENERAL_QUEUE_DESC = "Number of calls in the general call queue."; - String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue"; - String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue"; - String REPLICATION_QUEUE_DESC = + public static final String SENT_BYTES_NAME = "sentBytes"; + public static final String SENT_BYTES_DESC = "Number of bytes sent."; + public static final String RECEIVED_BYTES_NAME = "receivedBytes"; + public static final String RECEIVED_BYTES_DESC = "Number of bytes received."; + public static final String QUEUE_CALL_TIME_NAME = "queueCallTime"; + public static final String QUEUE_CALL_TIME_DESC = "Queue Call Time."; + public static final String PROCESS_CALL_TIME_NAME = "processCallTime"; + public static final String PROCESS_CALL_TIME_DESC = "Processing call time."; + public static final String QUEUE_SIZE_NAME = "queueSize"; + public static final String QUEUE_SIZE_DESC = "Number of bytes in the call queues."; + public static final String GENERAL_QUEUE_NAME = "numCallsInGeneralQueue"; + public static final String GENERAL_QUEUE_DESC = "Number of calls in the general call queue."; + public static final String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue"; + public static final String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue"; + public static final String REPLICATION_QUEUE_DESC = "Number of calls in the replication call queue."; - String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue."; - String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections"; - String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections."; + public static final String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue."; + public static final String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections"; + public static final String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections."; void authorizationSuccess(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java index 14a62f06f19..477ae425763 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java @@ -28,60 +28,60 @@ public interface MetricsMasterSource extends BaseSource { /** * The name of the metrics */ - String METRICS_NAME = "Server"; + static final String METRICS_NAME = "Server"; /** * The context metrics will be under. */ - String METRICS_CONTEXT = "master"; + static final String METRICS_CONTEXT = "master"; /** * The name of the metrics context that metrics will be under in jmx */ - String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME; + static final String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME; /** * Description */ - String METRICS_DESCRIPTION = "Metrics about HBase master server"; + static final String METRICS_DESCRIPTION = "Metrics about HBase master server"; // Strings used for exporting to metrics system. - String MASTER_ACTIVE_TIME_NAME = "masterActiveTime"; - String MASTER_START_TIME_NAME = "masterStartTime"; - String AVERAGE_LOAD_NAME = "averageLoad"; - String NUM_REGION_SERVERS_NAME = "numRegionServers"; - String NUM_DEAD_REGION_SERVERS_NAME = "numDeadRegionServers"; - String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum"; - String SERVER_NAME_NAME = "serverName"; - String CLUSTER_ID_NAME = "clusterId"; - String IS_ACTIVE_MASTER_NAME = "isActiveMaster"; - String SPLIT_TIME_NAME = "hlogSplitTime"; - String SPLIT_SIZE_NAME = "hlogSplitSize"; - String SNAPSHOT_TIME_NAME = "snapshotTime"; - String SNAPSHOT_RESTORE_TIME_NAME = "snapshotRestoreTime"; - String SNAPSHOT_CLONE_TIME_NAME = "snapshotCloneTime"; - String META_SPLIT_TIME_NAME = "metaHlogSplitTime"; - String META_SPLIT_SIZE_NAME = "metaHlogSplitSize"; - String CLUSTER_REQUESTS_NAME = "clusterRequests"; - String RIT_COUNT_NAME = "ritCount"; - String RIT_COUNT_OVER_THRESHOLD_NAME = "ritCountOverThreshold"; - String RIT_OLDEST_AGE_NAME = "ritOldestAge"; - String MASTER_ACTIVE_TIME_DESC = "Master Active Time"; - String MASTER_START_TIME_DESC = "Master Start Time"; - String AVERAGE_LOAD_DESC = "AverageLoad"; - String NUMBER_OF_REGION_SERVERS_DESC = "Number of RegionServers"; - String NUMBER_OF_DEAD_REGION_SERVERS_DESC = "Number of dead RegionServers"; - String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum"; - String SERVER_NAME_DESC = "Server Name"; - String CLUSTER_ID_DESC = "Cluster Id"; - String IS_ACTIVE_MASTER_DESC = "Is Active Master"; - String SPLIT_TIME_DESC = "Time it takes to finish HLog.splitLog()"; - String SPLIT_SIZE_DESC = "Size of HLog files being split"; - String SNAPSHOT_TIME_DESC = "Time it takes to finish snapshot()"; - String SNAPSHOT_RESTORE_TIME_DESC = "Time it takes to finish restoreSnapshot()"; - String SNAPSHOT_CLONE_TIME_DESC = "Time it takes to finish cloneSnapshot()"; - String META_SPLIT_TIME_DESC = "Time it takes to finish splitMetaLog()"; - String META_SPLIT_SIZE_DESC = "Size of META HLog files being split"; + static final String MASTER_ACTIVE_TIME_NAME = "masterActiveTime"; + static final String MASTER_START_TIME_NAME = "masterStartTime"; + static final String AVERAGE_LOAD_NAME = "averageLoad"; + static final String NUM_REGION_SERVERS_NAME = "numRegionServers"; + static final String NUM_DEAD_REGION_SERVERS_NAME = "numDeadRegionServers"; + static final String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum"; + static final String SERVER_NAME_NAME = "serverName"; + static final String CLUSTER_ID_NAME = "clusterId"; + static final String IS_ACTIVE_MASTER_NAME = "isActiveMaster"; + static final String SPLIT_TIME_NAME = "hlogSplitTime"; + static final String SPLIT_SIZE_NAME = "hlogSplitSize"; + static final String SNAPSHOT_TIME_NAME = "snapshotTime"; + static final String SNAPSHOT_RESTORE_TIME_NAME = "snapshotRestoreTime"; + static final String SNAPSHOT_CLONE_TIME_NAME = "snapshotCloneTime"; + static final String META_SPLIT_TIME_NAME = "metaHlogSplitTime"; + static final String META_SPLIT_SIZE_NAME = "metaHlogSplitSize"; + static final String CLUSTER_REQUESTS_NAME = "clusterRequests"; + static final String RIT_COUNT_NAME = "ritCount"; + static final String RIT_COUNT_OVER_THRESHOLD_NAME = "ritCountOverThreshold"; + static final String RIT_OLDEST_AGE_NAME = "ritOldestAge"; + static final String MASTER_ACTIVE_TIME_DESC = "Master Active Time"; + static final String MASTER_START_TIME_DESC = "Master Start Time"; + static final String AVERAGE_LOAD_DESC = "AverageLoad"; + static final String NUMBER_OF_REGION_SERVERS_DESC = "Number of RegionServers"; + static final String NUMBER_OF_DEAD_REGION_SERVERS_DESC = "Number of dead RegionServers"; + static final String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum"; + static final String SERVER_NAME_DESC = "Server Name"; + static final String CLUSTER_ID_DESC = "Cluster Id"; + static final String IS_ACTIVE_MASTER_DESC = "Is Active Master"; + static final String SPLIT_TIME_DESC = "Time it takes to finish HLog.splitLog()"; + static final String SPLIT_SIZE_DESC = "Size of HLog files being split"; + static final String SNAPSHOT_TIME_DESC = "Time it takes to finish snapshot()"; + static final String SNAPSHOT_RESTORE_TIME_DESC = "Time it takes to finish restoreSnapshot()"; + static final String SNAPSHOT_CLONE_TIME_DESC = "Time it takes to finish cloneSnapshot()"; + static final String META_SPLIT_TIME_DESC = "Time it takes to finish splitMetaLog()"; + static final String META_SPLIT_SIZE_DESC = "Size of META HLog files being split"; /** * Increment the number of requests the cluster has seen. diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java index 4c143358ee6..d11dd768061 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java @@ -24,7 +24,7 @@ package org.apache.hadoop.hbase.metrics; */ public interface BaseSource { - String HBASE_METRICS_SYSTEM_NAME = "HBase"; + public static final String HBASE_METRICS_SYSTEM_NAME = "HBase"; /** * Clear out the metrics and re-prepare the source. diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java index 0ef74a92787..5e6e27323f9 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java @@ -29,22 +29,22 @@ public interface MetricsRegionAggregateSource extends BaseSource { /** * The name of the metrics */ - String METRICS_NAME = "Regions"; + static final String METRICS_NAME = "Regions"; /** * The name of the metrics context that metrics will be under. */ - String METRICS_CONTEXT = "regionserver"; + static final String METRICS_CONTEXT = "regionserver"; /** * Description */ - String METRICS_DESCRIPTION = "Metrics about HBase RegionServer regions and tables"; + static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer regions and tables"; /** * The name of the metrics context that metrics will be under in jmx */ - String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; + static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; /** * Register a MetricsRegionSource as being open. diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index 48adeb28f7a..609f9dbfaab 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -28,22 +28,22 @@ public interface MetricsRegionServerSource extends BaseSource { /** * The name of the metrics */ - String METRICS_NAME = "Server"; + static final String METRICS_NAME = "Server"; /** * The name of the metrics context that metrics will be under. */ - String METRICS_CONTEXT = "regionserver"; + static final String METRICS_CONTEXT = "regionserver"; /** * Description */ - String METRICS_DESCRIPTION = "Metrics about HBase RegionServer"; + static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer"; /** * The name of the metrics context that metrics will be under in jmx */ - String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; + static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; /** * Update the Put time histogram @@ -113,103 +113,103 @@ public interface MetricsRegionServerSource extends BaseSource { void incrSlowAppend(); // Strings used for exporting to metrics system. - String REGION_COUNT = "regionCount"; - String REGION_COUNT_DESC = "Number of regions"; - String STORE_COUNT = "storeCount"; - String STORE_COUNT_DESC = "Number of Stores"; - String STOREFILE_COUNT = "storeFileCount"; - String STOREFILE_COUNT_DESC = "Number of Store Files"; - String MEMSTORE_SIZE = "memStoreSize"; - String MEMSTORE_SIZE_DESC = "Size of the memstore"; - String STOREFILE_SIZE = "storeFileSize"; - String STOREFILE_SIZE_DESC = "Size of storefiles being served."; - String TOTAL_REQUEST_COUNT = "totalRequestCount"; - String TOTAL_REQUEST_COUNT_DESC = + static final String REGION_COUNT = "regionCount"; + static final String REGION_COUNT_DESC = "Number of regions"; + static final String STORE_COUNT = "storeCount"; + static final String STORE_COUNT_DESC = "Number of Stores"; + static final String STOREFILE_COUNT = "storeFileCount"; + static final String STOREFILE_COUNT_DESC = "Number of Store Files"; + static final String MEMSTORE_SIZE = "memStoreSize"; + static final String MEMSTORE_SIZE_DESC = "Size of the memstore"; + static final String STOREFILE_SIZE = "storeFileSize"; + static final String STOREFILE_SIZE_DESC = "Size of storefiles being served."; + static final String TOTAL_REQUEST_COUNT = "totalRequestCount"; + static final String TOTAL_REQUEST_COUNT_DESC = "Total number of requests this RegionServer has answered."; - String READ_REQUEST_COUNT = "readRequestCount"; - String READ_REQUEST_COUNT_DESC = + static final String READ_REQUEST_COUNT = "readRequestCount"; + static final String READ_REQUEST_COUNT_DESC = "Number of read requests this region server has answered."; - String WRITE_REQUEST_COUNT = "writeRequestCount"; - String WRITE_REQUEST_COUNT_DESC = + static final String WRITE_REQUEST_COUNT = "writeRequestCount"; + static final String WRITE_REQUEST_COUNT_DESC = "Number of mutation requests this region server has answered."; - String CHECK_MUTATE_FAILED_COUNT = "checkMutateFailedCount"; - String CHECK_MUTATE_FAILED_COUNT_DESC = + static final String CHECK_MUTATE_FAILED_COUNT = "checkMutateFailedCount"; + static final String CHECK_MUTATE_FAILED_COUNT_DESC = "Number of Check and Mutate calls that failed the checks."; - String CHECK_MUTATE_PASSED_COUNT = "checkMutatePassedCount"; - String CHECK_MUTATE_PASSED_COUNT_DESC = + static final String CHECK_MUTATE_PASSED_COUNT = "checkMutatePassedCount"; + static final String CHECK_MUTATE_PASSED_COUNT_DESC = "Number of Check and Mutate calls that passed the checks."; - String STOREFILE_INDEX_SIZE = "storeFileIndexSize"; - String STOREFILE_INDEX_SIZE_DESC = "Size of indexes in storefiles on disk."; - String STATIC_INDEX_SIZE = "staticIndexSize"; - String STATIC_INDEX_SIZE_DESC = "Uncompressed size of the static indexes."; - String STATIC_BLOOM_SIZE = "staticBloomSize"; - String STATIC_BLOOM_SIZE_DESC = + static final String STOREFILE_INDEX_SIZE = "storeFileIndexSize"; + static final String STOREFILE_INDEX_SIZE_DESC = "Size of indexes in storefiles on disk."; + static final String STATIC_INDEX_SIZE = "staticIndexSize"; + static final String STATIC_INDEX_SIZE_DESC = "Uncompressed size of the static indexes."; + static final String STATIC_BLOOM_SIZE = "staticBloomSize"; + static final String STATIC_BLOOM_SIZE_DESC = "Uncompressed size of the static bloom filters."; - String NUMBER_OF_MUTATIONS_WITHOUT_WAL = "mutationsWithoutWALCount"; - String NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC = + static final String NUMBER_OF_MUTATIONS_WITHOUT_WAL = "mutationsWithoutWALCount"; + static final String NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC = "Number of mutations that have been sent by clients with the write ahead logging turned off."; - String DATA_SIZE_WITHOUT_WAL = "mutationsWithoutWALSize"; - String DATA_SIZE_WITHOUT_WAL_DESC = + static final String DATA_SIZE_WITHOUT_WAL = "mutationsWithoutWALSize"; + static final String DATA_SIZE_WITHOUT_WAL_DESC = "Size of data that has been sent by clients with the write ahead logging turned off."; - String PERCENT_FILES_LOCAL = "percentFilesLocal"; - String PERCENT_FILES_LOCAL_DESC = + static final String PERCENT_FILES_LOCAL = "percentFilesLocal"; + static final String PERCENT_FILES_LOCAL_DESC = "The percent of HFiles that are stored on the local hdfs data node."; - String COMPACTION_QUEUE_LENGTH = "compactionQueueLength"; - String COMPACTION_QUEUE_LENGTH_DESC = "Length of the queue for compactions."; - String FLUSH_QUEUE_LENGTH = "flushQueueLength"; - String FLUSH_QUEUE_LENGTH_DESC = "Length of the queue for region flushes"; - String BLOCK_CACHE_FREE_SIZE = "blockCacheFreeSize"; - String BLOCK_CACHE_FREE_DESC = + static final String COMPACTION_QUEUE_LENGTH = "compactionQueueLength"; + static final String COMPACTION_QUEUE_LENGTH_DESC = "Length of the queue for compactions."; + static final String FLUSH_QUEUE_LENGTH = "flushQueueLength"; + static final String FLUSH_QUEUE_LENGTH_DESC = "Length of the queue for region flushes"; + static final String BLOCK_CACHE_FREE_SIZE = "blockCacheFreeSize"; + static final String BLOCK_CACHE_FREE_DESC = "Size of the block cache that is not occupied."; - String BLOCK_CACHE_COUNT = "blockCacheCount"; - String BLOCK_CACHE_COUNT_DESC = "Number of block in the block cache."; - String BLOCK_CACHE_SIZE = "blockCacheSize"; - String BLOCK_CACHE_SIZE_DESC = "Size of the block cache."; - String BLOCK_CACHE_HIT_COUNT = "blockCacheHitCount"; - String BLOCK_CACHE_HIT_COUNT_DESC = "Count of the hit on the block cache."; - String BLOCK_CACHE_MISS_COUNT = "blockCacheMissCount"; - String BLOCK_COUNT_MISS_COUNT_DESC = + static final String BLOCK_CACHE_COUNT = "blockCacheCount"; + static final String BLOCK_CACHE_COUNT_DESC = "Number of block in the block cache."; + static final String BLOCK_CACHE_SIZE = "blockCacheSize"; + static final String BLOCK_CACHE_SIZE_DESC = "Size of the block cache."; + static final String BLOCK_CACHE_HIT_COUNT = "blockCacheHitCount"; + static final String BLOCK_CACHE_HIT_COUNT_DESC = "Count of the hit on the block cache."; + static final String BLOCK_CACHE_MISS_COUNT = "blockCacheMissCount"; + static final String BLOCK_COUNT_MISS_COUNT_DESC = "Number of requests for a block that missed the block cache."; - String BLOCK_CACHE_EVICTION_COUNT = "blockCacheEvictionCount"; - String BLOCK_CACHE_EVICTION_COUNT_DESC = + static final String BLOCK_CACHE_EVICTION_COUNT = "blockCacheEvictionCount"; + static final String BLOCK_CACHE_EVICTION_COUNT_DESC = "Count of the number of blocks evicted from the block cache."; - String BLOCK_CACHE_HIT_PERCENT = "blockCountHitPercent"; - String BLOCK_CACHE_HIT_PERCENT_DESC = + static final String BLOCK_CACHE_HIT_PERCENT = "blockCountHitPercent"; + static final String BLOCK_CACHE_HIT_PERCENT_DESC = "Percent of block cache requests that are hits"; - String BLOCK_CACHE_EXPRESS_HIT_PERCENT = "blockCacheExpressHitPercent"; - String BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC = + static final String BLOCK_CACHE_EXPRESS_HIT_PERCENT = "blockCacheExpressHitPercent"; + static final String BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC = "The percent of the time that requests with the cache turned on hit the cache."; - String RS_START_TIME_NAME = "regionServerStartTime"; - String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum"; - String SERVER_NAME_NAME = "serverName"; - String CLUSTER_ID_NAME = "clusterId"; - String RS_START_TIME_DESC = "RegionServer Start Time"; - String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum"; - String SERVER_NAME_DESC = "Server Name"; - String CLUSTER_ID_DESC = "Cluster Id"; - String UPDATES_BLOCKED_TIME = "updatesBlockedTime"; - String UPDATES_BLOCKED_DESC = + static final String RS_START_TIME_NAME = "regionServerStartTime"; + static final String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum"; + static final String SERVER_NAME_NAME = "serverName"; + static final String CLUSTER_ID_NAME = "clusterId"; + static final String RS_START_TIME_DESC = "RegionServer Start Time"; + static final String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum"; + static final String SERVER_NAME_DESC = "Server Name"; + static final String CLUSTER_ID_DESC = "Cluster Id"; + static final String UPDATES_BLOCKED_TIME = "updatesBlockedTime"; + static final String UPDATES_BLOCKED_DESC = "Number of MS updates have been blocked so that the memstore can be flushed."; - String DELETE_KEY = "delete"; - String GET_KEY = "get"; - String INCREMENT_KEY = "increment"; - String MUTATE_KEY = "mutate"; - String APPEND_KEY = "append"; - String REPLAY_KEY = "replay"; - String SCAN_NEXT_KEY = "scanNext"; - String SLOW_MUTATE_KEY = "slowPutCount"; - String SLOW_GET_KEY = "slowGetCount"; - String SLOW_DELETE_KEY = "slowDeleteCount"; - String SLOW_INCREMENT_KEY = "slowIncrementCount"; - String SLOW_APPEND_KEY = "slowAppendCount"; - String SLOW_MUTATE_DESC = + static final String DELETE_KEY = "delete"; + static final String GET_KEY = "get"; + static final String INCREMENT_KEY = "increment"; + static final String MUTATE_KEY = "mutate"; + static final String APPEND_KEY = "append"; + static final String REPLAY_KEY = "replay"; + static final String SCAN_NEXT_KEY = "scanNext"; + static final String SLOW_MUTATE_KEY = "slowPutCount"; + static final String SLOW_GET_KEY = "slowGetCount"; + static final String SLOW_DELETE_KEY = "slowDeleteCount"; + static final String SLOW_INCREMENT_KEY = "slowIncrementCount"; + static final String SLOW_APPEND_KEY = "slowAppendCount"; + static final String SLOW_MUTATE_DESC = "The number of Multis that took over 1000ms to complete"; - String SLOW_DELETE_DESC = + static final String SLOW_DELETE_DESC = "The number of Deletes that took over 1000ms to complete"; - String SLOW_GET_DESC = "The number of Gets that took over 1000ms to complete"; - String SLOW_INCREMENT_DESC = + static final String SLOW_GET_DESC = "The number of Gets that took over 1000ms to complete"; + static final String SLOW_INCREMENT_DESC = "The number of Increments that took over 1000ms to complete"; - String SLOW_APPEND_DESC = + static final String SLOW_APPEND_DESC = "The number of Appends that took over 1000ms to complete"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java index 0abff2545b3..de15d39824d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java @@ -27,35 +27,35 @@ public interface MetricsRegionServerWrapper { /** * Get ServerName */ - String getServerName(); + public String getServerName(); /** * Get the Cluster ID * * @return Cluster ID */ - String getClusterId(); + public String getClusterId(); /** * Get the Zookeeper Quorum Info * * @return Zookeeper Quorum Info */ - String getZookeeperQuorum(); + public String getZookeeperQuorum(); /** * Get the co-processors * * @return Co-processors */ - String getCoprocessors(); + public String getCoprocessors(); /** * Get HRegionServer start time * * @return Start time of RegionServer in milliseconds */ - long getStartCode(); + public long getStartCode(); /** * The number of online regions diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java index afff80ef56b..901473dfe3a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java @@ -25,8 +25,8 @@ package org.apache.hadoop.hbase.regionserver; */ public interface MetricsRegionSource extends Comparable { - String OPS_SAMPLE_NAME = "ops"; - String SIZE_VALUE_NAME = "size"; + public static final String OPS_SAMPLE_NAME = "ops"; + public static final String SIZE_VALUE_NAME = "size"; /** * Close the region's metrics as this region is closing. diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java index 793429d7a42..e4236e07018 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java @@ -29,30 +29,30 @@ public interface MetricsEditsReplaySource extends BaseSource { /** * The name of the metrics */ - String METRICS_NAME = "replay"; + static final String METRICS_NAME = "replay"; /** * The name of the metrics context that metrics will be under. */ - String METRICS_CONTEXT = "regionserver"; + static final String METRICS_CONTEXT = "regionserver"; /** * Description */ - String METRICS_DESCRIPTION = "Metrics about HBase RegionServer HLog Edits Replay"; + static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer HLog Edits Replay"; /** * The name of the metrics context that metrics will be under in jmx */ - String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; + static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - String REPLAY_TIME_NAME = "replayTime"; - String REPLAY_TIME_DESC = "Time an replay operation took."; - String REPLAY_BATCH_SIZE_NAME = "replayBatchSize"; - String REPLAY_BATCH_SIZE_DESC = "Number of changes in each replay batch."; - String REPLAY_DATA_SIZE_NAME = "replayDataSize"; - String REPLAY_DATA_SIZE_DESC = "Size (in bytes) of the data of each replay."; + static final String REPLAY_TIME_NAME = "replayTime"; + static final String REPLAY_TIME_DESC = "Time an replay operation took."; + static final String REPLAY_BATCH_SIZE_NAME = "replayBatchSize"; + static final String REPLAY_BATCH_SIZE_DESC = "Number of changes in each replay batch."; + static final String REPLAY_DATA_SIZE_NAME = "replayDataSize"; + static final String REPLAY_DATA_SIZE_DESC = "Size (in bytes) of the data of each replay."; /** * Add the time a replay command took diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java index 1c59f657139..ccbc166939d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java @@ -29,34 +29,34 @@ public interface MetricsWALSource extends BaseSource { /** * The name of the metrics */ - String METRICS_NAME = "WAL"; + static final String METRICS_NAME = "WAL"; /** * The name of the metrics context that metrics will be under. */ - String METRICS_CONTEXT = "regionserver"; + static final String METRICS_CONTEXT = "regionserver"; /** * Description */ - String METRICS_DESCRIPTION = "Metrics about HBase RegionServer HLog"; + static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer HLog"; /** * The name of the metrics context that metrics will be under in jmx */ - String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; + static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - String APPEND_TIME = "appendTime"; - String APPEND_TIME_DESC = "Time an append to the log took."; - String APPEND_COUNT = "appendCount"; - String APPEND_COUNT_DESC = "Number of appends to the write ahead log."; - String APPEND_SIZE = "appendSize"; - String APPEND_SIZE_DESC = "Size (in bytes) of the data appended to the HLog."; - String SLOW_APPEND_COUNT = "slowAppendCount"; - String SLOW_APPEND_COUNT_DESC = "Number of appends that were slow."; - String SYNC_TIME = "syncTime"; - String SYNC_TIME_DESC = "The time it took to sync the HLog to HDFS."; + static final String APPEND_TIME = "appendTime"; + static final String APPEND_TIME_DESC = "Time an append to the log took."; + static final String APPEND_COUNT = "appendCount"; + static final String APPEND_COUNT_DESC = "Number of appends to the write ahead log."; + static final String APPEND_SIZE = "appendSize"; + static final String APPEND_SIZE_DESC = "Size (in bytes) of the data appended to the HLog."; + static final String SLOW_APPEND_COUNT = "slowAppendCount"; + static final String SLOW_APPEND_COUNT_DESC = "Number of appends that were slow."; + static final String SYNC_TIME = "syncTime"; + static final String SYNC_TIME_DESC = "The time it took to sync the HLog to HDFS."; /** * Add the append size. diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java index 6a917015d22..5b79a3977a4 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java @@ -28,21 +28,21 @@ public interface MetricsReplicationSource extends BaseSource { /** * The name of the metrics */ - String METRICS_NAME = "Replication"; + static final String METRICS_NAME = "Replication"; /** * The name of the metrics context that metrics will be under. */ - String METRICS_CONTEXT = "regionserver"; + static final String METRICS_CONTEXT = "regionserver"; /** * The name of the metrics context that metrics will be under. */ - String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; + static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; /** * A description. */ - String METRICS_DESCRIPTION = "Metrics about HBase replication"; + static final String METRICS_DESCRIPTION = "Metrics about HBase replication"; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java index c1629f788b3..aa43f35fedb 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java @@ -25,27 +25,27 @@ import org.apache.hadoop.hbase.metrics.BaseSource; */ public interface MetricsRESTSource extends BaseSource { - String METRICS_NAME = "REST"; + public static String METRICS_NAME = "REST"; - String CONTEXT = "rest"; + public static String CONTEXT = "rest"; - String JMX_CONTEXT = "REST"; + public static String JMX_CONTEXT = "REST"; - String METRICS_DESCRIPTION = "Metrics about the HBase REST server"; + public static String METRICS_DESCRIPTION = "Metrics about the HBase REST server"; - String REQUEST_KEY = "requests"; + static String REQUEST_KEY = "requests"; - String SUCCESSFUL_GET_KEY = "successfulGet"; + static String SUCCESSFUL_GET_KEY = "successfulGet"; - String SUCCESSFUL_PUT_KEY = "successfulPut"; + static String SUCCESSFUL_PUT_KEY = "successfulPut"; - String SUCCESSFUL_DELETE_KEY = "successfulDelete"; + static String SUCCESSFUL_DELETE_KEY = "successfulDelete"; - String FAILED_GET_KEY = "failedGet"; + static String FAILED_GET_KEY = "failedGet"; - String FAILED_PUT_KEY = "failedPut"; + static String FAILED_PUT_KEY = "failedPut"; - String FAILED_DELETE_KEY = "failedDelete"; + static String FAILED_DELETE_KEY = "failedDelete"; /** * Increment the number of requests diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java index 4520f8cc1f1..206154fdb46 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java @@ -25,12 +25,12 @@ import org.apache.hadoop.hbase.metrics.BaseSource; */ public interface MetricsThriftServerSource extends BaseSource { - String BATCH_GET_KEY = "batchGet"; - String BATCH_MUTATE_KEY = "batchMutate"; - String TIME_IN_QUEUE_KEY = "timeInQueue"; - String THRIFT_CALL_KEY = "thriftCall"; - String SLOW_THRIFT_CALL_KEY = "slowThriftCall"; - String CALL_QUEUE_LEN_KEY = "callQueueLen"; + static final String BATCH_GET_KEY = "batchGet"; + static final String BATCH_MUTATE_KEY = "batchMutate"; + static final String TIME_IN_QUEUE_KEY = "timeInQueue"; + static final String THRIFT_CALL_KEY = "thriftCall"; + static final String SLOW_THRIFT_CALL_KEY = "slowThriftCall"; + static final String CALL_QUEUE_LEN_KEY = "callQueueLen"; /** * Add how long an operation was in the queue. diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java index a4608b5603a..8fca2cf3ce8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java @@ -21,12 +21,12 @@ package org.apache.hadoop.hbase.thrift; /** Factory that will be used to create metrics sources for the two diffent types of thrift servers. */ public interface MetricsThriftServerSourceFactory { - String METRICS_NAME = "Thrift"; - String METRICS_DESCRIPTION = "Thrift Server Metrics"; - String THRIFT_ONE_METRICS_CONTEXT = "thrift-one"; - String THRIFT_ONE_JMX_CONTEXT = "Thrift,sub=ThriftOne"; - String THRIFT_TWO_METRICS_CONTEXT = "thrift-two"; - String THRIFT_TWO_JMX_CONTEXT = "Thrift,sub=ThriftTwo"; + static final String METRICS_NAME = "Thrift"; + static final String METRICS_DESCRIPTION = "Thrift Server Metrics"; + static final String THRIFT_ONE_METRICS_CONTEXT = "thrift-one"; + static final String THRIFT_ONE_JMX_CONTEXT = "Thrift,sub=ThriftOne"; + static final String THRIFT_TWO_METRICS_CONTEXT = "thrift-two"; + static final String THRIFT_TWO_JMX_CONTEXT = "Thrift,sub=ThriftTwo"; /** Create a Source for a thrift one server */ MetricsThriftServerSource createThriftOneSource(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java index 93ff5ba5af7..f431632a170 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java @@ -25,14 +25,14 @@ package org.apache.hadoop.metrics2; public interface MetricHistogram { //Strings used to create metrics names. - String NUM_OPS_METRIC_NAME = "_num_ops"; - String MIN_METRIC_NAME = "_min"; - String MAX_METRIC_NAME = "_max"; - String MEAN_METRIC_NAME = "_mean"; - String MEDIAN_METRIC_NAME = "_median"; - String SEVENTY_FIFTH_PERCENTILE_METRIC_NAME = "_75th_percentile"; - String NINETY_FIFTH_PERCENTILE_METRIC_NAME = "_95th_percentile"; - String NINETY_NINETH_PERCENTILE_METRIC_NAME = "_99th_percentile"; + static final String NUM_OPS_METRIC_NAME = "_num_ops"; + static final String MIN_METRIC_NAME = "_min"; + static final String MAX_METRIC_NAME = "_max"; + static final String MEAN_METRIC_NAME = "_mean"; + static final String MEDIAN_METRIC_NAME = "_median"; + static final String SEVENTY_FIFTH_PERCENTILE_METRIC_NAME = "_75th_percentile"; + static final String NINETY_FIFTH_PERCENTILE_METRIC_NAME = "_95th_percentile"; + static final String NINETY_NINETH_PERCENTILE_METRIC_NAME = "_99th_percentile"; /** * Add a single value to a histogram's stream of values. diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java index 157327babb2..88e4af9558d 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java @@ -32,6 +32,6 @@ public interface HadoopShims { * TaskAttemptId.forName() * @return a concrete TaskAttemptContext instance of o.a.h.mapreduce.TaskAttemptContext */ - T createTestTaskAttemptContext(final J job, final String taskId); + public T createTestTaskAttemptContext(final J job, final String taskId); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java index 2eefcd2d694..1d4d2f62ecc 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java @@ -27,7 +27,7 @@ public interface MetricsAssertHelper { * Init helper. This method will make sure that the metrics system is set * up for tests. */ - void init(); + public void init(); /** * Assert that a tag exists and has a given value. @@ -37,7 +37,7 @@ public interface MetricsAssertHelper { * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - void assertTag(String name, String expected, BaseSource source); + public void assertTag(String name, String expected, BaseSource source); /** * Assert that a gauge exists and that it's value is equal to the expected value. @@ -47,7 +47,7 @@ public interface MetricsAssertHelper { * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - void assertGauge(String name, long expected, BaseSource source); + public void assertGauge(String name, long expected, BaseSource source); /** * Assert that a gauge exists and it's value is greater than a given value @@ -57,7 +57,7 @@ public interface MetricsAssertHelper { * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - void assertGaugeGt(String name, long expected, BaseSource source); + public void assertGaugeGt(String name, long expected, BaseSource source); /** * Assert that a gauge exists and it's value is less than a given value @@ -67,7 +67,7 @@ public interface MetricsAssertHelper { * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - void assertGaugeLt(String name, long expected, BaseSource source); + public void assertGaugeLt(String name, long expected, BaseSource source); /** * Assert that a gauge exists and that it's value is equal to the expected value. @@ -77,7 +77,7 @@ public interface MetricsAssertHelper { * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - void assertGauge(String name, double expected, BaseSource source); + public void assertGauge(String name, double expected, BaseSource source); /** * Assert that a gauge exists and it's value is greater than a given value @@ -87,7 +87,7 @@ public interface MetricsAssertHelper { * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - void assertGaugeGt(String name, double expected, BaseSource source); + public void assertGaugeGt(String name, double expected, BaseSource source); /** * Assert that a gauge exists and it's value is less than a given value @@ -97,7 +97,7 @@ public interface MetricsAssertHelper { * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - void assertGaugeLt(String name, double expected, BaseSource source); + public void assertGaugeLt(String name, double expected, BaseSource source); /** * Assert that a counter exists and that it's value is equal to the expected value. @@ -107,7 +107,7 @@ public interface MetricsAssertHelper { * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - void assertCounter(String name, long expected, BaseSource source); + public void assertCounter(String name, long expected, BaseSource source); /** * Assert that a counter exists and that it's value is greater than the given value. @@ -117,7 +117,7 @@ public interface MetricsAssertHelper { * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - void assertCounterGt(String name, long expected, BaseSource source); + public void assertCounterGt(String name, long expected, BaseSource source); /** * Assert that a counter exists and that it's value is less than the given value. @@ -127,7 +127,7 @@ public interface MetricsAssertHelper { * @param source The BaseSource{@link BaseSource} that will provide the tags, * gauges, and counters. */ - void assertCounterLt(String name, long expected, BaseSource source); + public void assertCounterLt(String name, long expected, BaseSource source); /** * Get the value of a counter. @@ -137,7 +137,7 @@ public interface MetricsAssertHelper { * gauges, and counters. * @return long value of the counter. */ - long getCounter(String name, BaseSource source); + public long getCounter(String name, BaseSource source); /** * Get the value of a gauge as a double. @@ -147,7 +147,7 @@ public interface MetricsAssertHelper { * gauges, and counters. * @return double value of the gauge. */ - double getGaugeDouble(String name, BaseSource source); + public double getGaugeDouble(String name, BaseSource source); /** * Get the value of a gauge as a long. @@ -157,5 +157,5 @@ public interface MetricsAssertHelper { * gauges, and counters. * @return long value of the gauge. */ - long getGaugeLong(String name, BaseSource source); + public long getGaugeLong(String name, BaseSource source); } diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/builder/TestTokenizerData.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/builder/TestTokenizerData.java index 2ec51efd806..9576bb534c2 100644 --- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/builder/TestTokenizerData.java +++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/builder/TestTokenizerData.java @@ -31,7 +31,7 @@ public interface TestTokenizerData { List getInputs(); List getOutputs(); - class InMemory { + public static class InMemory { public Collection getAllAsObjectArray() { List all = Lists.newArrayList(); all.add(new Object[] { new TestTokenizerDataBasic() }); diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/column/TestColumnData.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/column/TestColumnData.java index 3b206f285d7..47773cb25f3 100644 --- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/column/TestColumnData.java +++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/column/TestColumnData.java @@ -32,7 +32,7 @@ public interface TestColumnData { List getInputs(); List getOutputs(); - class InMemory { + public static class InMemory { public Collection getAllAsObjectArray() { List all = Lists.newArrayList(); all.add(new Object[] { new TestColumnDataSimple() }); diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/TestRowData.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/TestRowData.java index c5e3a0a2202..b0cb43f4342 100644 --- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/TestRowData.java +++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/TestRowData.java @@ -54,7 +54,7 @@ public interface TestRowData { void individualSearcherAssertions(CellSearcher searcher); - class InMemory { + public static class InMemory { /* * The following are different styles of data that the codec may encounter. Having these small diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/TestTimestampData.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/TestTimestampData.java index c3618ffbb3d..f26c5b8c40e 100644 --- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/TestTimestampData.java +++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/TestTimestampData.java @@ -33,7 +33,7 @@ public interface TestTimestampData { long getMinimum(); List getOutputs(); - class InMemory { + public static class InMemory { public Collection getAllAsObjectArray() { List all = Lists.newArrayList(); all.add(new Object[] { new TestTimestampDataBasic() }); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java index da8712826c0..58f4e7514ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java @@ -36,7 +36,7 @@ public interface InterProcessLock { * @throws InterruptedException If current thread is interrupted while * waiting for the lock */ - void acquire() throws IOException, InterruptedException; + public void acquire() throws IOException, InterruptedException; /** * Acquire the lock within a wait time. @@ -50,7 +50,7 @@ public interface InterProcessLock { * @throws InterruptedException If the thread is interrupted while waiting to * acquire the lock */ - boolean tryAcquire(long timeoutMs) + public boolean tryAcquire(long timeoutMs) throws IOException, InterruptedException; /** @@ -59,7 +59,7 @@ public interface InterProcessLock { * @throws InterruptedException If the thread is interrupted while releasing * the lock */ - void release() throws IOException, InterruptedException; + public void release() throws IOException, InterruptedException; /** * If supported, attempts to reap all the locks of this type by forcefully @@ -69,7 +69,7 @@ public interface InterProcessLock { * lock holder is still alive. * @throws IOException If there is an unrecoverable error reaping the locks */ - void reapExpiredLocks(long expireTimeoutMs) throws IOException; + public void reapExpiredLocks(long expireTimeoutMs) throws IOException; /** * If supported, attempts to reap all the locks of this type by forcefully @@ -80,12 +80,12 @@ public interface InterProcessLock { * with timeout=0. * @throws IOException If there is an unrecoverable error reaping the locks */ - void reapAllLocks() throws IOException; + public void reapAllLocks() throws IOException; /** * An interface for objects that process lock metadata. */ - interface MetadataHandler { + public static interface MetadataHandler { /** * Called after lock metadata is successfully read from a distributed @@ -93,7 +93,7 @@ public interface InterProcessLock { * printing the metadata in a humanly-readable format. * @param metadata The metadata */ - void handleMetadata(byte[] metadata); + public void handleMetadata(byte[] metadata); } /** @@ -101,5 +101,5 @@ public interface InterProcessLock { * {@link MetadataHandler}. * @throws InterruptedException If there is an unrecoverable error */ - void visitLocks(MetadataHandler handler) throws IOException; + public void visitLocks(MetadataHandler handler) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessReadWriteLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessReadWriteLock.java index ddee0297017..68004bad530 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessReadWriteLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessReadWriteLock.java @@ -34,7 +34,7 @@ public interface InterProcessReadWriteLock { * which the lock was acquired). * @return An instantiated InterProcessLock instance */ - InterProcessLock readLock(byte[] metadata); + public InterProcessLock readLock(byte[] metadata); /** * Obtain a write lock containing given metadata. @@ -43,5 +43,5 @@ public interface InterProcessReadWriteLock { * which the lock was acquired). * @return An instantiated InterProcessLock instance */ - InterProcessLock writeLock(byte[] metadata); + public InterProcessLock writeLock(byte[] metadata); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java index e087626d0fa..38c223c91e0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java @@ -35,7 +35,7 @@ public interface TableDescriptors { * @return HTableDescriptor for tablename * @throws IOException */ - HTableDescriptor get(final String tablename) + public HTableDescriptor get(final String tablename) throws IOException; /** @@ -43,7 +43,7 @@ public interface TableDescriptors { * @return HTableDescriptor for tablename * @throws IOException */ - HTableDescriptor get(final byte[] tablename) + public HTableDescriptor get(final byte[] tablename) throws IOException; /** @@ -52,7 +52,7 @@ public interface TableDescriptors { * @return Map of all descriptors. * @throws IOException */ - Map getAll() + public Map getAll() throws IOException; /** @@ -60,7 +60,7 @@ public interface TableDescriptors { * @param htd Descriptor to set into TableDescriptors * @throws IOException */ - void add(final HTableDescriptor htd) + public void add(final HTableDescriptor htd) throws IOException; /** @@ -68,6 +68,6 @@ public interface TableDescriptors { * @return Instance of table descriptor or null if none found. * @throws IOException */ - HTableDescriptor remove(final String tablename) + public HTableDescriptor remove(final String tablename) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java index 05bb2cdec50..31628099bd7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java @@ -76,6 +76,6 @@ public interface Constraint extends Configurable { * @throws org.apache.hadoop.hbase.exceptions.ConstraintException when the {@link Put} does not match the * constraint. */ - void check(Put p) throws ConstraintException; + public void check(Put p) throws ConstraintException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java index 2f78e11134f..128a5b947c7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java @@ -29,5 +29,5 @@ import org.apache.hadoop.classification.InterfaceStability; @InterfaceAudience.Public @InterfaceStability.Evolving public interface CoprocessorService { - Service getService(); + public Service getService(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java index c9e6233c5b5..fde38a6bee6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java @@ -31,12 +31,12 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; @InterfaceStability.Evolving public interface RegionCoprocessorEnvironment extends CoprocessorEnvironment { /** @return the region associated with this coprocessor */ - HRegion getRegion(); + public HRegion getRegion(); /** @return reference to the region server services */ - RegionServerServices getRegionServerServices(); + public RegionServerServices getRegionServerServices(); /** @return shared data between all instances of this coprocessor */ - ConcurrentMap getSharedData(); + public ConcurrentMap getSharedData(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java index da329a982a0..8a2416d797b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java @@ -28,5 +28,5 @@ import org.apache.hadoop.hbase.regionserver.wal.HLog; @InterfaceStability.Evolving public interface WALCoprocessorEnvironment extends CoprocessorEnvironment { /** @return reference to the region server services */ - HLog getWAL(); + public HLog getWAL(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java index d7e24b50b68..014da53fd63 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java @@ -36,5 +36,5 @@ public interface ForeignExceptionListener { * Implementers must ensure that this method is thread-safe. * @param e exception causing the error. Implementations must accept and handle null here. */ - void receive(ForeignException e); -} + public void receive(ForeignException e); +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java index 1e97796d63a..47586ddd43a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java @@ -47,7 +47,7 @@ public interface ForeignExceptionSnare { * @throws ForeignException * all exceptions from remote sources are procedure exceptions */ - void rethrowException() throws ForeignException; + public void rethrowException() throws ForeignException; /** * Non-exceptional form of {@link #rethrowException()}. Checks to see if any @@ -56,12 +56,12 @@ public interface ForeignExceptionSnare { * * @return true if there has been an error,false otherwise */ - boolean hasException(); + public boolean hasException(); /** * Get the value of the captured exception. * * @return the captured foreign exception or null if no exception captured. */ - ForeignException getException(); + public ForeignException getException(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java index 559e7e8a2b8..f20cac115cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java @@ -87,12 +87,12 @@ public abstract class EventHandler implements Runnable, Comparable { * Called before any event is processed * @param event The event handler whose process method is about to be called. */ - void beforeProcess(EventHandler event); + public void beforeProcess(EventHandler event); /** * Called after any event is processed * @param event The event handler whose process method is about to be called. */ - void afterProcess(EventHandler event); + public void afterProcess(EventHandler event); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java index 848c88639a8..ece1c4f5db2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java @@ -296,7 +296,7 @@ public class HFileSystem extends FilterFileSystem { /** * Interface to implement to add a specific reordering logic in hdfs. */ - interface ReorderBlocks { + static interface ReorderBlocks { /** * * @param conf - the conf to use @@ -304,7 +304,7 @@ public class HFileSystem extends FilterFileSystem { * @param src - the file name currently read * @throws IOException - if something went wrong */ - void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) throws IOException; + public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) throws IOException; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java index f3e27b2cc91..1da99c707a6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java @@ -34,5 +34,5 @@ public interface WritableWithSize { * * @return the size of the writable */ - long getWritableSize(); + public long getWritableSize(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index df3b6964419..670fb864695 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -36,14 +36,14 @@ public interface BlockCache { * @param buf The block contents wrapped in a ByteBuffer. * @param inMemory Whether block should be treated as in-memory */ - void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory); + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory); /** * Add block to cache (defaults to not in-memory). * @param cacheKey The block's cache key. * @param buf The object to cache. */ - void cacheBlock(BlockCacheKey cacheKey, Cacheable buf); + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf); /** * Fetch block from cache. @@ -54,62 +54,62 @@ public interface BlockCache { * @return Block or null if block is not in 2 cache. * @see HFileReaderV2#readBlock(long, long, boolean, boolean, boolean, BlockType) */ - Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat); + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat); /** * Evict block from cache. * @param cacheKey Block to evict * @return true if block existed and was evicted, false if not */ - boolean evictBlock(BlockCacheKey cacheKey); + public boolean evictBlock(BlockCacheKey cacheKey); /** * Evicts all blocks for the given HFile. * * @return the number of blocks evicted */ - int evictBlocksByHfileName(String hfileName); + public int evictBlocksByHfileName(String hfileName); /** * Get the statistics for this block cache. * @return Stats */ - CacheStats getStats(); + public CacheStats getStats(); /** * Shutdown the cache. */ - void shutdown(); + public void shutdown(); /** * Returns the total size of the block cache, in bytes. * @return size of cache, in bytes */ - long size(); + public long size(); /** * Returns the free size of the block cache, in bytes. * @return free space in cache, in bytes */ - long getFreeSize(); + public long getFreeSize(); /** * Returns the occupied size of the block cache, in bytes. * @return occupied space in cache, in bytes */ - long getCurrentSize(); + public long getCurrentSize(); /** * Returns the number of evictions that have occurred. * @return number of evictions */ - long getEvictedCount(); + public long getEvictedCount(); /** * Returns the number of blocks currently cached in the block cache. * @return number of blocks in the cache */ - long getBlockCount(); + public long getBlockCount(); /** * Performs a BlockCache summary and returns a List of BlockCacheColumnFamilySummary objects. @@ -123,5 +123,5 @@ public interface BlockCache { * @return List of BlockCacheColumnFamilySummary * @throws IOException exception */ - List getBlockCacheColumnFamilySummaries(Configuration conf) throws IOException; + public List getBlockCacheColumnFamilySummaries(Configuration conf) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java index 51451996b76..28886f3f1a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java @@ -42,23 +42,23 @@ public interface Cacheable extends HeapSize { * @return int length in bytes of the serialized form. */ - int getSerializedLength(); + public int getSerializedLength(); /** * Serializes its data into destination. */ - void serialize(ByteBuffer destination); + public void serialize(ByteBuffer destination); /** * Returns CacheableDeserializer instance which reconstructs original object from ByteBuffer. * * @return CacheableDeserialzer instance. */ - CacheableDeserializer getDeserializer(); + public CacheableDeserializer getDeserializer(); /** * @return the block type of this cached HFile block */ - BlockType getBlockType(); + public BlockType getBlockType(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java index b99341c45ec..014a673314c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java @@ -33,7 +33,7 @@ public interface CacheableDeserializer { * * @return T the deserialized object. */ - T deserialize(ByteBuffer b) throws IOException; + public T deserialize(ByteBuffer b) throws IOException; /** * @@ -43,12 +43,12 @@ public interface CacheableDeserializer { * @return T the deserialized object. * @throws IOException */ - T deserialize(ByteBuffer b, boolean reuse) throws IOException; + public T deserialize(ByteBuffer b, boolean reuse) throws IOException; /** * Get the identifier of this deserialiser. Identifier is unique for each * deserializer and generated by {@link CacheableDeserializerIdManager} * @return identifier number of this cacheable deserializer */ - int getDeserialiserIdentifier(); + public int getDeserialiserIdentifier(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java index 7d2cb77e3ca..1644928014c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java @@ -49,9 +49,8 @@ public interface HFileDataBlockEncoder { * generated). * @return non null block which is coded according to the settings. */ - HFileBlock diskToCacheFormat( - HFileBlock block, boolean isCompaction - ); + public HFileBlock diskToCacheFormat(HFileBlock block, + boolean isCompaction); /** * Should be called before an encoded or unencoded data block is written to @@ -61,39 +60,37 @@ public interface HFileDataBlockEncoder { * @param blockType block type * @throws IOException */ - void beforeWriteToDisk( - ByteBuffer in, - boolean includesMemstoreTS, - HFileBlockEncodingContext encodingResult, - BlockType blockType - ) throws IOException; + public void beforeWriteToDisk( + ByteBuffer in, boolean includesMemstoreTS, + HFileBlockEncodingContext encodingResult, + BlockType blockType) throws IOException; /** * Decides whether we should use a scanner over encoded blocks. * @param isCompaction whether we are in a compaction. * @return Whether to use encoded scanner. */ - boolean useEncodedScanner(boolean isCompaction); + public boolean useEncodedScanner(boolean isCompaction); /** * Save metadata in HFile which will be written to disk * @param writer writer for a given HFile * @exception IOException on disk problems */ - void saveMetadata(HFile.Writer writer) + public void saveMetadata(HFile.Writer writer) throws IOException; /** @return the on-disk data block encoding */ - DataBlockEncoding getEncodingOnDisk(); + public DataBlockEncoding getEncodingOnDisk(); /** @return the preferred in-cache data block encoding for normal reads */ - DataBlockEncoding getEncodingInCache(); + public DataBlockEncoding getEncodingInCache(); /** * @return the effective in-cache data block encoding, taking into account * whether we are doing a compaction. */ - DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction); + public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction); /** * Create an encoder specific encoding context object for writing. And the @@ -104,9 +101,8 @@ public interface HFileDataBlockEncoder { * @param headerBytes header bytes * @return a new {@link HFileBlockEncodingContext} object */ - HFileBlockEncodingContext newOnDiskDataBlockEncodingContext( - Algorithm compressionAlgorithm, byte[] headerBytes - ); + public HFileBlockEncodingContext newOnDiskDataBlockEncodingContext( + Algorithm compressionAlgorithm, byte[] headerBytes); /** * create a encoder specific decoding context for reading. And the @@ -116,8 +112,7 @@ public interface HFileDataBlockEncoder { * @param compressionAlgorithm * @return a new {@link HFileBlockDecodingContext} object */ - HFileBlockDecodingContext newOnDiskDataBlockDecodingContext( - Algorithm compressionAlgorithm - ); + public HFileBlockDecodingContext newOnDiskDataBlockDecodingContext( + Algorithm compressionAlgorithm); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java index 0e353efc641..ffca0c26b6c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java @@ -54,8 +54,8 @@ public interface HFileScanner { * false when it is called. * @throws IOException */ - int seekTo(byte[] key) throws IOException; - int seekTo(byte[] key, int offset, int length) throws IOException; + public int seekTo(byte[] key) throws IOException; + public int seekTo(byte[] key, int offset, int length) throws IOException; /** * Reseek to or just before the passed key. Similar to seekTo * except that this can be called even if the scanner is not at the beginning @@ -76,8 +76,8 @@ public interface HFileScanner { * 1, such that k[i] < key, and scanner is left in position i. * @throws IOException */ - int reseekTo(byte[] key) throws IOException; - int reseekTo(byte[] key, int offset, int length) throws IOException; + public int reseekTo(byte[] key) throws IOException; + public int reseekTo(byte[] key, int offset, int length) throws IOException; /** * Consider the key stream of all the keys in the file, * k[0] .. k[n], where there are n keys in the file. @@ -88,28 +88,28 @@ public interface HFileScanner { * return false (EOF). * @throws IOException */ - boolean seekBefore(byte[] key) throws IOException; - boolean seekBefore(byte[] key, int offset, int length) throws IOException; + public boolean seekBefore(byte [] key) throws IOException; + public boolean seekBefore(byte []key, int offset, int length) throws IOException; /** * Positions this scanner at the start of the file. * @return False if empty file; i.e. a call to next would return false and * the current key and value are undefined. * @throws IOException */ - boolean seekTo() throws IOException; + public boolean seekTo() throws IOException; /** * Scans to the next entry in the file. * @return Returns false if you are at the end otherwise true if more in file. * @throws IOException */ - boolean next() throws IOException; + public boolean next() throws IOException; /** * Gets a buffer view to the current key. You must call * {@link #seekTo(byte[])} before this method. * @return byte buffer for the key. The limit is set to the key size, and the * position is 0, the start of the buffer view. */ - ByteBuffer getKey(); + public ByteBuffer getKey(); /** * Gets a buffer view to the current value. You must call * {@link #seekTo(byte[])} before this method. @@ -117,31 +117,31 @@ public interface HFileScanner { * @return byte buffer for the value. The limit is set to the value size, and * the position is 0, the start of the buffer view. */ - ByteBuffer getValue(); + public ByteBuffer getValue(); /** * @return Instance of {@link KeyValue}. */ - KeyValue getKeyValue(); + public KeyValue getKeyValue(); /** * Convenience method to get a copy of the key as a string - interpreting the * bytes as UTF8. You must call {@link #seekTo(byte[])} before this method. * @return key as a string */ - String getKeyString(); + public String getKeyString(); /** * Convenience method to get a copy of the value as a string - interpreting * the bytes as UTF8. You must call {@link #seekTo(byte[])} before this method. * @return value as a string */ - String getValueString(); + public String getValueString(); /** * @return Reader that underlies this Scanner instance. */ - HFile.Reader getReader(); + public HFile.Reader getReader(); /** * @return True is scanner has had one of the seek calls invoked; i.e. * {@link #seekBefore(byte[])} or {@link #seekTo()} or {@link #seekTo(byte[])}. * Otherwise returns false. */ - boolean isSeeked(); + public boolean isSeeked(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/Delayable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/Delayable.java index c6ffe8fb2fd..f527158a1f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/Delayable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/Delayable.java @@ -35,17 +35,17 @@ public interface Delayable { * should be set when ending the delay or right away. There are cases when * the return value can be set right away, even if the call is delayed. */ - void startDelay(boolean delayReturnValue); + public void startDelay(boolean delayReturnValue); /** * @return is the call delayed? */ - boolean isDelayed(); + public boolean isDelayed(); /** * @return is the return value delayed? */ - boolean isReturnValueDelayed(); + public boolean isReturnValueDelayed(); /** * Signal that the RPC server is now allowed to send the response. @@ -54,14 +54,14 @@ public interface Delayable { * not be delayed, this parameter must be null. * @throws IOException */ - void endDelay(Object result) throws IOException; + public void endDelay(Object result) throws IOException; /** * Signal the end of a delayed RPC, without specifying the return value. Use * this only if the return value was not delayed * @throws IOException */ - void endDelay() throws IOException; + public void endDelay() throws IOException; /** * End the call, throwing and exception to the caller. This works regardless @@ -69,5 +69,5 @@ public interface Delayable { * @param t Object to throw to the client. * @throws IOException */ - void endDelayThrowing(Throwable t) throws IOException; -} + public void endDelayThrowing(Throwable t) throws IOException; +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java index cf75b6b5dce..04f88a85204 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java @@ -31,5 +31,5 @@ public interface HBaseRPCErrorHandler { * @param e the throwable * @return if the server should be shut down */ - boolean checkOOME(final Throwable e) ; + public boolean checkOOME(final Throwable e) ; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java index 9fe93443dd0..91f09e21571 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java @@ -65,7 +65,7 @@ public interface RpcServerInterface { */ MetricsHBaseServer getMetrics(); - void setQosFunction(Function, Integer> newFunc); + public void setQosFunction(Function, Integer> newFunc); /** * Refresh autentication manager policy. @@ -73,4 +73,4 @@ public interface RpcServerInterface { */ @VisibleForTesting void refreshAuthManager(PolicyProvider pp); -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index c3df1e34f44..b1046bf4d1c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -1145,11 +1145,11 @@ public class AssignmentManager extends ZooKeeperListener { /** * A specific runnable that works only on a region. */ - private interface RegionRunnable extends Runnable{ + private static interface RegionRunnable extends Runnable{ /** * @return - the name of the region it works on. */ - String getRegionName(); + public String getRegionName(); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java index e6d96d3081d..d6458c10df9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java @@ -220,14 +220,14 @@ public class ClusterStatusPublisher extends Chore { } - public interface Publisher extends Closeable { + public static interface Publisher extends Closeable { - void connect(Configuration conf) throws IOException; + public void connect(Configuration conf) throws IOException; - void publish(ClusterStatus cs); + public void publish(ClusterStatus cs); @Override - void close(); + public void close(); } public static class MulticastPublisher implements Publisher { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index b1427c47c72..e5252fee969 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -50,21 +50,21 @@ public interface LoadBalancer extends Configurable { * Set the current cluster status. This allows a LoadBalancer to map host name to a server * @param st */ - void setClusterStatus(ClusterStatus st); + public void setClusterStatus(ClusterStatus st); /** * Set the master service. * @param masterServices */ - void setMasterServices(MasterServices masterServices); + public void setMasterServices(MasterServices masterServices); /** * Perform the major balance operation * @param clusterState * @return List of plans */ - List balanceCluster(Map> clusterState); + public List balanceCluster(Map> clusterState); /** * Perform a Round Robin assignment of regions. @@ -72,10 +72,7 @@ public interface LoadBalancer extends Configurable { * @param servers * @return Map of servername to regioninfos */ - Map> roundRobinAssignment( - List regions, - List servers - ); + public Map> roundRobinAssignment(List regions, List servers); /** * Assign regions to the previously hosting region server @@ -83,10 +80,7 @@ public interface LoadBalancer extends Configurable { * @param servers * @return List of plans */ - Map> retainAssignment( - Map regions, - List servers - ); + public Map> retainAssignment(Map regions, List servers); /** * Sync assign a region @@ -94,10 +88,7 @@ public interface LoadBalancer extends Configurable { * @param servers * @return Map regioninfos to servernames */ - Map immediateAssignment( - List regions, - List servers - ); + public Map immediateAssignment(List regions, List servers); /** * Get a random region server from the list @@ -105,7 +96,6 @@ public interface LoadBalancer extends Configurable { * @param servers * @return Servername */ - ServerName randomAssignment( - HRegionInfo regionInfo, List servers - ); + public ServerName randomAssignment(HRegionInfo regionInfo, + List servers); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 07e0dfad3e1..7e3faf7cc19 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -40,32 +40,32 @@ public interface MasterServices extends Server { /** * @return Master's instance of the {@link AssignmentManager} */ - AssignmentManager getAssignmentManager(); + public AssignmentManager getAssignmentManager(); /** * @return Master's filesystem {@link MasterFileSystem} utility class. */ - MasterFileSystem getMasterFileSystem(); + public MasterFileSystem getMasterFileSystem(); /** * @return Master's {@link ServerManager} instance. */ - ServerManager getServerManager(); + public ServerManager getServerManager(); /** * @return Master's instance of {@link ExecutorService} */ - ExecutorService getExecutorService(); + public ExecutorService getExecutorService(); /** * @return Master's instance of {@link TableLockManager} */ - TableLockManager getTableLockManager(); + public TableLockManager getTableLockManager(); /** * @return Master's instance of {@link MasterCoprocessorHost} */ - MasterCoprocessorHost getCoprocessorHost(); + public MasterCoprocessorHost getCoprocessorHost(); /** * Check table is modifiable; i.e. exists and is offline. @@ -75,7 +75,7 @@ public interface MasterServices extends Server { * @throws IOException */ // We actually throw the exceptions mentioned in the - void checkTableModifiable(final byte[] tableName) + public void checkTableModifiable(final byte [] tableName) throws IOException, TableNotFoundException, TableNotDisabledException; /** @@ -84,7 +84,7 @@ public interface MasterServices extends Server { * @param splitKeys Starting row keys for the initial table regions. If null * a single region is created. */ - void createTable(HTableDescriptor desc, byte[][] splitKeys) + public void createTable(HTableDescriptor desc, byte [][] splitKeys) throws IOException; /** @@ -92,7 +92,7 @@ public interface MasterServices extends Server { * @param tableName The table name * @throws IOException */ - void deleteTable(final byte[] tableName) throws IOException; + public void deleteTable(final byte[] tableName) throws IOException; /** * Modify the descriptor of an existing table @@ -100,7 +100,7 @@ public interface MasterServices extends Server { * @param descriptor The updated table descriptor * @throws IOException */ - void modifyTable(final byte[] tableName, final HTableDescriptor descriptor) + public void modifyTable(final byte[] tableName, final HTableDescriptor descriptor) throws IOException; /** @@ -108,14 +108,14 @@ public interface MasterServices extends Server { * @param tableName The table name * @throws IOException */ - void enableTable(final byte[] tableName) throws IOException; + public void enableTable(final byte[] tableName) throws IOException; /** * Disable an existing table * @param tableName The table name * @throws IOException */ - void disableTable(final byte[] tableName) throws IOException; + public void disableTable(final byte[] tableName) throws IOException; /** * Add a new column to an existing table @@ -123,7 +123,7 @@ public interface MasterServices extends Server { * @param column The column definition * @throws IOException */ - void addColumn(final byte[] tableName, final HColumnDescriptor column) + public void addColumn(final byte[] tableName, final HColumnDescriptor column) throws IOException; /** @@ -132,7 +132,7 @@ public interface MasterServices extends Server { * @param descriptor The updated column definition * @throws IOException */ - void modifyColumn(byte[] tableName, HColumnDescriptor descriptor) + public void modifyColumn(byte[] tableName, HColumnDescriptor descriptor) throws IOException; /** @@ -141,18 +141,18 @@ public interface MasterServices extends Server { * @param columnName The column name * @throws IOException */ - void deleteColumn(final byte[] tableName, final byte[] columnName) + public void deleteColumn(final byte[] tableName, final byte[] columnName) throws IOException; /** * @return Return table descriptors implementation. */ - TableDescriptors getTableDescriptors(); + public TableDescriptors getTableDescriptors(); /** * @return true if master enables ServerShutdownHandler; */ - boolean isServerShutdownHandlerEnabled(); + public boolean isServerShutdownHandlerEnabled(); /** * Registers a new protocol buffer {@link Service} subclass as a master coprocessor endpoint. @@ -167,7 +167,7 @@ public interface MasterServices extends Server { * @return {@code true} if the registration was successful, {@code false} * otherwise */ - boolean registerService(Service instance); + public boolean registerService(Service instance); /** * Merge two regions. The real implementation is on the regionserver, master @@ -178,13 +178,12 @@ public interface MasterServices extends Server { * two adjacent regions * @throws IOException */ - void dispatchMergingRegions( - final HRegionInfo region_a, final HRegionInfo region_b, final boolean forcible - ) throws IOException; + public void dispatchMergingRegions(final HRegionInfo region_a, + final HRegionInfo region_b, final boolean forcible) throws IOException; /** * @return true if master is initialized */ - boolean isInitialized(); + public boolean isInitialized(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java index 0f703f4561d..d621d7724d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java @@ -34,30 +34,30 @@ public interface SnapshotSentinel { * @return false if the snapshot is still in progress, true if the snapshot has * finished */ - boolean isFinished(); + public boolean isFinished(); /** * @return -1 if the snapshot is in progress, otherwise the completion timestamp. */ - long getCompletionTimestamp(); + public long getCompletionTimestamp(); /** * Actively cancel a running snapshot. * @param why Reason for cancellation. */ - void cancel(String why); + public void cancel(String why); /** * @return the description of the snapshot being run */ - SnapshotDescription getSnapshot(); + public SnapshotDescription getSnapshot(); /** * Get the exception that caused the snapshot to fail, if the snapshot has failed. * @return {@link ForeignException} that caused the snapshot to fail, or null if the * snapshot is still in progress or has succeeded */ - ForeignException getExceptionIfFailed(); + public ForeignException getExceptionIfFailed(); /** * Rethrow the exception returned by {@link SnapshotSentinel#getExceptionIfFailed}. @@ -65,5 +65,5 @@ public interface SnapshotSentinel { * * @throws ForeignException all exceptions from remote sources are procedure exceptions */ - void rethrowExceptionIfFailed() throws ForeignException; + public void rethrowExceptionIfFailed() throws ForeignException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index 2dd36122e63..d23e20eaca3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -1593,11 +1593,11 @@ public class SplitLogManager extends ZooKeeperListener { * a serialization point at the end of the task processing. Must be * restartable and idempotent. */ - public interface TaskFinisher { + static public interface TaskFinisher { /** * status that can be returned finish() */ - enum Status { + static public enum Status { /** * task completed successfully */ @@ -1616,7 +1616,7 @@ public class SplitLogManager extends ZooKeeperListener { * @param taskname * @return DONE if task completed successfully, ERR otherwise */ - Status finish(ServerName workerName, String taskname); + public Status finish(ServerName workerName, String taskname); } enum ResubmitDirective { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java index a616b928598..4da993ed953 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java @@ -82,20 +82,20 @@ public abstract class TableLockManager { * A distributed lock for a table. */ @InterfaceAudience.Private - public interface TableLock { + public static interface TableLock { /** * Acquire the lock, with the configured lock timeout. * @throws LockTimeoutException If unable to acquire a lock within a specified * time period (if any) * @throws IOException If unrecoverable error occurs */ - void acquire() throws IOException; + public void acquire() throws IOException; /** * Release the lock already held. * @throws IOException If there is an unrecoverable error releasing the lock */ - void release() throws IOException; + public void release() throws IOException; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java index e46e407fde2..c03278195d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java @@ -35,5 +35,5 @@ public interface FileCleanerDelegate extends Configurable, Stoppable { * @param fStat file status of the file to check * @return true if the file is deletable, false if not */ - boolean isFileDeletable(FileStatus fStat); + public boolean isFileDeletable(FileStatus fStat); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java index 53284ec11d1..6a3ca25b9ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java @@ -33,5 +33,5 @@ public interface TotesHRegionInfo { /** * @return HRegionInfo instance. */ - HRegionInfo getHRegionInfo(); + public HRegionInfo getHRegionInfo(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandler.java index cbef929e352..031943c6714 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandler.java @@ -29,16 +29,16 @@ import com.google.protobuf.Message; */ @InterfaceAudience.Private public interface MonitoredRPCHandler extends MonitoredTask { - String getRPC(); - String getRPC(boolean withParams); - long getRPCPacketLength(); - String getClient(); - long getRPCStartTime(); - long getRPCQueueTime(); - boolean isRPCRunning(); - boolean isOperationRunning(); + public abstract String getRPC(); + public abstract String getRPC(boolean withParams); + public abstract long getRPCPacketLength(); + public abstract String getClient(); + public abstract long getRPCStartTime(); + public abstract long getRPCQueueTime(); + public abstract boolean isRPCRunning(); + public abstract boolean isOperationRunning(); - void setRPC(String methodName, Object[] params, long queueTime); - void setRPCPacket(Message param); - void setConnection(String clientAddress, int remotePort); + public abstract void setRPC(String methodName, Object [] params, long queueTime); + public abstract void setRPCPacket(Message param); + public abstract void setConnection(String clientAddress, int remotePort); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java index 01a87228e23..a1618b9704b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java @@ -32,47 +32,47 @@ public interface MonitoredTask extends Cloneable { ABORTED; } - long getStartTime(); - String getDescription(); - String getStatus(); - long getStatusTime(); - State getState(); - long getStateTime(); - long getCompletionTimestamp(); + public abstract long getStartTime(); + public abstract String getDescription(); + public abstract String getStatus(); + public abstract long getStatusTime(); + public abstract State getState(); + public abstract long getStateTime(); + public abstract long getCompletionTimestamp(); - void markComplete(String msg); - void pause(String msg); - void resume(String msg); - void abort(String msg); - void expireNow(); + public abstract void markComplete(String msg); + public abstract void pause(String msg); + public abstract void resume(String msg); + public abstract void abort(String msg); + public abstract void expireNow(); - void setStatus(String status); - void setDescription(String description); + public abstract void setStatus(String status); + public abstract void setDescription(String description); /** * Explicitly mark this status as able to be cleaned up, * even though it might not be complete. */ - void cleanup(); + public abstract void cleanup(); /** * Public exposure of Object.clone() in order to allow clients to easily * capture current state. * @return a copy of the object whose references will not change */ - MonitoredTask clone(); + public abstract MonitoredTask clone(); /** * Creates a string map of internal details for extensible exposure of * monitored tasks. * @return A Map containing information for this task. */ - Map toMap() throws IOException; + public abstract Map toMap() throws IOException; /** * Creates a JSON object for parseable exposure of monitored tasks. * @return An encoded JSON object containing information for this task. */ - String toJSON() throws IOException; + public abstract String toJSON() throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java index dff892c998a..209c67107f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java @@ -39,7 +39,7 @@ public interface ProcedureCoordinatorRpcs extends Closeable { * @param listener * @return true if succeed, false if encountered initialization errors. */ - boolean start(final ProcedureCoordinator listener); + public boolean start(final ProcedureCoordinator listener); /** * Notify the members that the coordinator has aborted the procedure and that it should release @@ -50,7 +50,7 @@ public interface ProcedureCoordinatorRpcs extends Closeable { * @throws IOException if the rpcs can't reach the other members of the procedure (and can't * recover). */ - void sendAbortToMembers(Procedure procName, ForeignException cause) throws IOException; + public void sendAbortToMembers(Procedure procName, ForeignException cause) throws IOException; /** * Notify the members to acquire barrier for the procedure @@ -61,7 +61,7 @@ public interface ProcedureCoordinatorRpcs extends Closeable { * @throws IllegalArgumentException if the procedure was already marked as failed * @throws IOException if we can't reach the remote notification mechanism */ - void sendGlobalBarrierAcquire(Procedure procName, byte[] info, List members) + public void sendGlobalBarrierAcquire(Procedure procName, byte[] info, List members) throws IOException, IllegalArgumentException; /** @@ -74,12 +74,12 @@ public interface ProcedureCoordinatorRpcs extends Closeable { * @param members members to tell we have reached in-barrier phase * @throws IOException if we can't reach the remote notification mechanism */ - void sendGlobalBarrierReached(Procedure procName, List members) throws IOException; + public void sendGlobalBarrierReached(Procedure procName, List members) throws IOException; /** * Notify Members to reset the distributed state for procedure * @param procName name of the procedure to reset * @throws IOException if the remote notification mechanism cannot be reached */ - void resetMembers(Procedure procName) throws IOException; + public void resetMembers(Procedure procName) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMemberRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMemberRpcs.java index 7264865b42c..9dc95a12bcd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMemberRpcs.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMemberRpcs.java @@ -35,13 +35,13 @@ public interface ProcedureMemberRpcs extends Closeable { /** * Initialize and start any threads or connections the member needs. */ - void start(final String memberName, final ProcedureMember member); + public void start(final String memberName, final ProcedureMember member); /** * Each subprocedure is being executed on a member. This is the identifier for the member. * @return the member name */ - String getMemberName(); + public String getMemberName(); /** * Notify the coordinator that we aborted the specified {@link Subprocedure} @@ -51,7 +51,7 @@ public interface ProcedureMemberRpcs extends Closeable { * @throws IOException thrown when the rpcs can't reach the other members of the procedure (and * thus can't recover). */ - void sendMemberAborted(Subprocedure sub, ForeignException cause) throws IOException; + public void sendMemberAborted(Subprocedure sub, ForeignException cause) throws IOException; /** * Notify the coordinator that the specified {@link Subprocedure} has acquired the locally required @@ -60,7 +60,7 @@ public interface ProcedureMemberRpcs extends Closeable { * @param sub the specified {@link Subprocedure} * @throws IOException if we can't reach the coordinator */ - void sendMemberAcquired(Subprocedure sub) throws IOException; + public void sendMemberAcquired(Subprocedure sub) throws IOException; /** * Notify the coordinator that the specified {@link Subprocedure} has completed the work that @@ -69,5 +69,5 @@ public interface ProcedureMemberRpcs extends Closeable { * @param sub the specified {@link Subprocedure} * @throws IOException if we can't reach the coordinator */ - void sendMemberCompleted(Subprocedure sub) throws IOException; -} + public void sendMemberCompleted(Subprocedure sub) throws IOException; +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/SubprocedureFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/SubprocedureFactory.java index ccabc801f73..0b94c89daed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/SubprocedureFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/SubprocedureFactory.java @@ -36,5 +36,5 @@ public interface SubprocedureFactory { * request * @throws IllegalStateException if the current runner cannot accept any more new requests */ - Subprocedure buildSubprocedure(String procName, byte[] procArgs); + public Subprocedure buildSubprocedure(String procName, byte[] procArgs); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java index 67ad725fa3d..d9f8e8986d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java @@ -56,26 +56,25 @@ public interface ColumnTracker { * @throws IOException in case there is an internal consistency problem * caused by a data corruption. */ - ScanQueryMatcher.MatchCode checkColumn( - byte[] bytes, int offset, int length, long ttl, byte type, boolean ignoreCount - ) + public ScanQueryMatcher.MatchCode checkColumn(byte[] bytes, int offset, + int length, long ttl, byte type, boolean ignoreCount) throws IOException; /** * Updates internal variables in between files */ - void update(); + public void update(); /** * Resets the Matcher */ - void reset(); + public void reset(); /** * * @return true when done. */ - boolean done(); + public boolean done(); /** * Used by matcher and scan/get to get a hint of the next column @@ -88,14 +87,13 @@ public interface ColumnTracker { * * @return null, or a ColumnCount that we should seek to */ - ColumnCount getColumnHint(); + public ColumnCount getColumnHint(); /** * Retrieve the MatchCode for the next row or column */ - MatchCode getNextRowOrNextColumn( - byte[] bytes, int offset, int qualLength - ); + public MatchCode getNextRowOrNextColumn(byte[] bytes, int offset, + int qualLength); /** * Give the tracker a chance to declare it's done based on only the timestamp @@ -104,5 +102,5 @@ public interface ColumnTracker { * @param timestamp * @return true to early out based on timestamp. */ - boolean isDone(long timestamp); + public boolean isDone(long timestamp); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java index 531f25c4879..d7a3da5f6bf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java @@ -34,7 +34,7 @@ public interface CompactionRequestor { * compactions were started * @throws IOException */ - List requestCompaction(final HRegion r, final String why) + public List requestCompaction(final HRegion r, final String why) throws IOException; /** @@ -47,9 +47,8 @@ public interface CompactionRequestor { * compactions were started * @throws IOException */ - List requestCompaction( - final HRegion r, final String why, List> requests - ) + public List requestCompaction(final HRegion r, final String why, + List> requests) throws IOException; /** @@ -61,9 +60,8 @@ public interface CompactionRequestor { * @return The created {@link CompactionRequest} or null if no compaction was started. * @throws IOException */ - CompactionRequest requestCompaction( - final HRegion r, final Store s, final String why, CompactionRequest request - ) throws IOException; + public CompactionRequest requestCompaction(final HRegion r, final Store s, final String why, + CompactionRequest request) throws IOException; /** * @param r Region to compact @@ -76,9 +74,8 @@ public interface CompactionRequestor { * compactions were started. * @throws IOException */ - List requestCompaction( - final HRegion r, final String why, int pri, List> requests - ) throws IOException; + public List requestCompaction(final HRegion r, final String why, int pri, + List> requests) throws IOException; /** * @param r Region to compact @@ -90,7 +87,6 @@ public interface CompactionRequestor { * @return The created {@link CompactionRequest} or null if no compaction was started * @throws IOException */ - CompactionRequest requestCompaction( - final HRegion r, final Store s, final String why, int pri, CompactionRequest request - ) throws IOException; + public CompactionRequest requestCompaction(final HRegion r, final Store s, final String why, + int pri, CompactionRequest request) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java index 430e3dda24e..d4b8934ec35 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java @@ -43,9 +43,8 @@ public interface DeleteTracker { * @param timestamp timestamp * @param type delete type as byte */ - void add( - byte[] buffer, int qualifierOffset, int qualifierLength, long timestamp, byte type - ); + public void add(byte [] buffer, int qualifierOffset, int qualifierLength, + long timestamp, byte type); /** * Check if the specified KeyValue buffer has been deleted by a previously @@ -56,14 +55,13 @@ public interface DeleteTracker { * @param timestamp timestamp * @return deleteResult The result tells whether the KeyValue is deleted and why */ - DeleteResult isDeleted( - byte[] buffer, int qualifierOffset, int qualifierLength, long timestamp - ); + public DeleteResult isDeleted(byte [] buffer, int qualifierOffset, + int qualifierLength, long timestamp); /** * @return true if there are no current delete, false otherwise */ - boolean isEmpty(); + public boolean isEmpty(); /** * Called at the end of every StoreFile. @@ -71,14 +69,14 @@ public interface DeleteTracker { * Many optimized implementations of Trackers will require an update at * when the end of each StoreFile is reached. */ - void update(); + public void update(); /** * Called between rows. *

* This clears everything as if a new DeleteTracker was instantiated. */ - void reset(); + public void reset(); /** @@ -104,7 +102,7 @@ public interface DeleteTracker { * Based on the delete result, the ScanQueryMatcher will decide the next * operation */ - enum DeleteResult { + public static enum DeleteResult { FAMILY_DELETED, // The KeyValue is deleted by a delete family. FAMILY_VERSION_DELETED, // The KeyValue is deleted by a delete family version. COLUMN_DELETED, // The KeyValue is deleted by a delete column. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index cb74b28e019..7f21e3a8dd3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -5602,7 +5602,7 @@ public class HRegion implements HeapSize { // , Writable{ * bulkLoadHFile() to perform any necessary * pre/post processing of a given bulkload call */ - public interface BulkLoadListener { + public static interface BulkLoadListener { /** * Called before an HFile is actually loaded diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java index e8abdbbc9b2..e4be0a763d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java @@ -47,7 +47,7 @@ public interface InternalScanner extends Closeable { * @return true if more rows exist after this one, false if scanner is done * @throws IOException e */ - boolean next(List results) throws IOException; + public boolean next(List results) throws IOException; /** * Grab the next row's worth of values with a limit on the number of values @@ -57,11 +57,11 @@ public interface InternalScanner extends Closeable { * @return true if more rows exist after this one, false if scanner is done * @throws IOException e */ - boolean next(List result, int limit) throws IOException; + public boolean next(List result, int limit) throws IOException; /** * Closes the scanner and releases any resources it has allocated * @throws IOException */ - void close() throws IOException; + public void close() throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java index e76dd1f8a13..bc8d487c070 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java @@ -34,20 +34,20 @@ public interface KeyValueScanner { * Look at the next KeyValue in this scanner, but do not iterate scanner. * @return the next KeyValue */ - KeyValue peek(); + public KeyValue peek(); /** * Return the next KeyValue in this scanner, iterating the scanner * @return the next KeyValue */ - KeyValue next() throws IOException; + public KeyValue next() throws IOException; /** * Seek the scanner at or after the specified KeyValue. * @param key seek value * @return true if scanner has values left, false if end of scanner */ - boolean seek(KeyValue key) throws IOException; + public boolean seek(KeyValue key) throws IOException; /** * Reseek the scanner at or after the specified KeyValue. @@ -57,7 +57,7 @@ public interface KeyValueScanner { * @param key seek value (should be non-null) * @return true if scanner has values left, false if end of scanner */ - boolean reseek(KeyValue key) throws IOException; + public boolean reseek(KeyValue key) throws IOException; /** * Get the sequence id associated with this KeyValueScanner. This is required @@ -65,12 +65,12 @@ public interface KeyValueScanner { * The default implementation for this would be to return 0. A file having * lower sequence id will be considered to be the older one. */ - long getSequenceID(); + public long getSequenceID(); /** * Close the KeyValue scanner. */ - void close(); + public void close(); /** * Allows to filter out scanners (both StoreFile and memstore) that we don't @@ -82,9 +82,8 @@ public interface KeyValueScanner { * this query, based on TTL * @return true if the scanner should be included in the query */ - boolean shouldUseScanner( - Scan scan, SortedSet columns, long oldestUnexpiredTS - ); + public boolean shouldUseScanner(Scan scan, SortedSet columns, + long oldestUnexpiredTS); // "Lazy scanner" optimizations @@ -98,7 +97,7 @@ public interface KeyValueScanner { * @param forward do a forward-only "reseek" instead of a random-access seek * @param useBloom whether to enable multi-column Bloom filter optimization */ - boolean requestSeek(KeyValue kv, boolean forward, boolean useBloom) + public boolean requestSeek(KeyValue kv, boolean forward, boolean useBloom) throws IOException; /** @@ -107,7 +106,7 @@ public interface KeyValueScanner { * store scanner bubbles up to the top of the key-value heap. This method is * then used to ensure the top store file scanner has done a seek operation. */ - boolean realSeekDone(); + public boolean realSeekDone(); /** * Does the real seek operation in case it was skipped by @@ -116,11 +115,11 @@ public interface KeyValueScanner { * of the scanners). The easiest way to achieve this is to call * {@link #realSeekDone()} first. */ - void enforceSeek() throws IOException; + public void enforceSeek() throws IOException; /** * @return true if this is a file scanner. Otherwise a memory scanner is * assumed. */ - boolean isFileScanner(); + public boolean isFileScanner(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java index e013665839b..4fb8db43185 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java @@ -29,5 +29,5 @@ public interface LastSequenceId { * @param regionName Encoded region name * @return Last flushed sequence Id for regionName or -1 if it can't be determined */ - long getLastSequenceId(byte[] regionName); + public long getLastSequenceId(byte[] regionName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseListener.java index 2e4e00e8b5a..05c6e60c54a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseListener.java @@ -32,5 +32,5 @@ import org.apache.hadoop.classification.InterfaceAudience; @InterfaceAudience.Private public interface LeaseListener { /** When a lease expires, this method is called. */ - void leaseExpired(); + public void leaseExpired(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java index 52dcda01661..244a84b349d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java @@ -35,7 +35,7 @@ interface OnlineRegions extends Server { * Add to online regions. * @param r */ - void addToOnlineRegions(final HRegion r); + public void addToOnlineRegions(final HRegion r); /** * This method removes HRegion corresponding to hri from the Map of onlineRegions. @@ -44,7 +44,7 @@ interface OnlineRegions extends Server { * @param destination Destination, if any, null otherwise. * @return True if we removed a region from online list. */ - boolean removeFromOnlineRegions(final HRegion r, ServerName destination); + public boolean removeFromOnlineRegions(final HRegion r, ServerName destination); /** * Return {@link HRegion} instance. @@ -54,7 +54,7 @@ interface OnlineRegions extends Server { * @return HRegion for the passed encoded encodedRegionName or * null if named region is not member of the online regions. */ - HRegion getFromOnlineRegions(String encodedRegionName); + public HRegion getFromOnlineRegions(String encodedRegionName); /** * Get all online regions of a table in this RS. @@ -62,5 +62,5 @@ interface OnlineRegions extends Server { * @return List of HRegion * @throws java.io.IOException */ - List getOnlineRegions(byte[] tableName) throws IOException; + public List getOnlineRegions(byte[] tableName) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java index 4a5e584fae6..e47047639cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java @@ -34,13 +34,13 @@ public interface RegionScanner extends InternalScanner { /** * @return The RegionInfo for this scanner. */ - HRegionInfo getRegionInfo(); + public HRegionInfo getRegionInfo(); /** * @return True if a filter indicates that this scanner will return no further rows. * @throws IOException in case of I/O failure on a filter. */ - boolean isFilterDone() throws IOException; + public boolean isFilterDone() throws IOException; /** * Do a reseek to the required row. Should not be used to seek to a key which @@ -52,17 +52,17 @@ public interface RegionScanner extends InternalScanner { * if row is null * */ - boolean reseek(byte[] row) throws IOException; + public boolean reseek(byte[] row) throws IOException; /** * @return The preferred max buffersize. See {@link Scan#setMaxResultSize(long)} */ - long getMaxResultSize(); + public long getMaxResultSize(); /** * @return The Scanner's MVCC readPt see {@link MultiVersionConsistencyControl} */ - long getMvccReadPoint(); + public long getMvccReadPoint(); /** * Grab the next row's worth of values with the default limit on the number of values @@ -74,7 +74,7 @@ public interface RegionScanner extends InternalScanner { * @return true if more rows exist after this one, false if scanner is done * @throws IOException e */ - boolean nextRaw(List result) throws IOException; + public boolean nextRaw(List result) throws IOException; /** * Grab the next row's worth of values with a limit on the number of values @@ -102,5 +102,5 @@ public interface RegionScanner extends InternalScanner { * @return true if more rows exist after this one, false if scanner is done * @throws IOException e */ - boolean nextRaw(List result, int limit) throws IOException; + public boolean nextRaw(List result, int limit) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index a9fa5cc231d..03f41394d38 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -40,31 +40,31 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi /** * @return True if this regionserver is stopping. */ - boolean isStopping(); + public boolean isStopping(); /** @return the HLog for a particular region. Pass null for getting the * default (common) WAL */ - HLog getWAL(HRegionInfo regionInfo) throws IOException; + public HLog getWAL(HRegionInfo regionInfo) throws IOException; /** * @return Implementation of {@link CompactionRequestor} or null. */ - CompactionRequestor getCompactionRequester(); + public CompactionRequestor getCompactionRequester(); /** * @return Implementation of {@link FlushRequester} or null. */ - FlushRequester getFlushRequester(); + public FlushRequester getFlushRequester(); /** * @return the RegionServerAccounting for this Region Server */ - RegionServerAccounting getRegionServerAccounting(); + public RegionServerAccounting getRegionServerAccounting(); /** * @return RegionServer's instance of {@link TableLockManager} */ - TableLockManager getTableLockManager(); + public TableLockManager getTableLockManager(); /** * Tasks to perform after region open to complete deploy of region on @@ -75,42 +75,42 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi * @throws KeeperException * @throws IOException */ - void postOpenDeployTasks(final HRegion r, final CatalogTracker ct) + public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct) throws KeeperException, IOException; /** * Returns a reference to the region server's RPC server */ - RpcServerInterface getRpcServer(); + public RpcServerInterface getRpcServer(); /** * Get the regions that are currently being opened or closed in the RS * @return map of regions in transition in this RS */ - ConcurrentMap getRegionsInTransitionInRS(); + public ConcurrentMap getRegionsInTransitionInRS(); /** * @return Return the FileSystem object used by the regionserver */ - FileSystem getFileSystem(); + public FileSystem getFileSystem(); /** * @return The RegionServer's "Leases" service */ - Leases getLeases(); + public Leases getLeases(); /** * @return hbase executor service */ - ExecutorService getExecutorService(); + public ExecutorService getExecutorService(); /** * @return The RegionServer's CatalogTracker */ - CatalogTracker getCatalogTracker(); + public CatalogTracker getCatalogTracker(); /** * @return set of recovering regions on the hosting region server */ - Map getRecoveringRegions(); + public Map getRecoveringRegions(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java index cef7b4630c1..9f3e20ab70b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java @@ -38,18 +38,17 @@ public interface ReplicationService { * Initializes the replication service object. * @throws IOException */ - void initialize( - Server rs, FileSystem fs, Path logdir, Path oldLogDir - ) throws IOException; + public void initialize(Server rs, FileSystem fs, Path logdir, + Path oldLogDir) throws IOException; /** * Start replication services. * @throws IOException */ - void startReplicationService() throws IOException; + public void startReplicationService() throws IOException; /** * Stops replication service. */ - void stopReplicationService(); + public void stopReplicationService(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java index 893c8b62990..28573bd0b35 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java @@ -38,5 +38,5 @@ public interface ReplicationSinkService extends ReplicationService { * @param cells Cells that the WALEntries refer to (if cells is non-null) * @throws IOException */ - void replicateLogEntries(List entries, CellScanner cells) throws IOException; -} + public void replicateLogEntries(List entries, CellScanner cells) throws IOException; +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java index ac8e59ed39b..edc5c6ad32a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java @@ -32,5 +32,5 @@ public interface ReplicationSourceService extends ReplicationService { * Returns a WALObserver for the service. This is needed to * observe log rolls and log archival events. */ - WALActionsListener getWALActionsListener(); -} + public WALActionsListener getWALActionsListener(); +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java index 8ec37357aea..5abca00455d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java @@ -640,13 +640,13 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable { * is better to have workers prepare the task and then have the * {@link SplitLogManager} commit the work in SplitLogManager.TaskFinisher */ - public interface TaskExecutor { - enum Status { + static public interface TaskExecutor { + static public enum Status { DONE(), ERR(), RESIGNED(), PREEMPTED() } - Status exec(String name, CancelableProgressable p); + public Status exec(String name, CancelableProgressable p); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 79454fd49b8..1f2017c5b6f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -50,13 +50,14 @@ public interface Store extends HeapSize, StoreConfigInformation { /* The default priority for user-specified compaction requests. * The user gets top priority unless we have blocking compactions. (Pri <= 0) - */ int PRIORITY_USER = 1; - int NO_PRIORITY = Integer.MIN_VALUE; + */ + public static final int PRIORITY_USER = 1; + public static final int NO_PRIORITY = Integer.MIN_VALUE; // General Accessors - KeyValue.KVComparator getComparator(); + public KeyValue.KVComparator getComparator(); - Collection getStorefiles(); + public Collection getStorefiles(); /** * Close all the readers We don't need to worry about subsequent requests because the HRegion @@ -64,7 +65,7 @@ public interface Store extends HeapSize, StoreConfigInformation { * @return the {@link StoreFile StoreFiles} that were previously being used. * @throws IOException on failure */ - Collection close() throws IOException; + public Collection close() throws IOException; /** * Return a scanner for both the memstore and the HStore files. Assumes we are not in a @@ -74,7 +75,7 @@ public interface Store extends HeapSize, StoreConfigInformation { * @return a scanner over the current key values * @throws IOException on failure */ - KeyValueScanner getScanner(Scan scan, final NavigableSet targetCols) + public KeyValueScanner getScanner(Scan scan, final NavigableSet targetCols) throws IOException; /** @@ -88,16 +89,11 @@ public interface Store extends HeapSize, StoreConfigInformation { * @param stopRow * @return all scanners for this store */ - List getScanners( - boolean cacheBlocks, - boolean isGet, - boolean isCompaction, - ScanQueryMatcher matcher, - byte[] startRow, - byte[] stopRow - ) throws IOException; + public List getScanners(boolean cacheBlocks, + boolean isGet, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, + byte[] stopRow) throws IOException; - ScanInfo getScanInfo(); + public ScanInfo getScanInfo(); /** * Adds or replaces the specified KeyValues. @@ -112,14 +108,14 @@ public interface Store extends HeapSize, StoreConfigInformation { * @return memstore size delta * @throws IOException */ - long upsert(Iterable cells, long readpoint) throws IOException; + public long upsert(Iterable cells, long readpoint) throws IOException; /** * Adds a value to the memstore * @param kv * @return memstore size delta */ - long add(KeyValue kv); + public long add(KeyValue kv); /** * When was the last edit done in the memstore @@ -131,7 +127,7 @@ public interface Store extends HeapSize, StoreConfigInformation { * key & memstoreTS value of the kv parameter. * @param kv */ - void rollback(final KeyValue kv); + public void rollback(final KeyValue kv); /** * Find the key that matches row exactly, or the one that immediately precedes it. WARNING: @@ -145,9 +141,9 @@ public interface Store extends HeapSize, StoreConfigInformation { * @return Found keyvalue or null if none found. * @throws IOException */ - KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException; + public KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException; - FileSystem getFileSystem(); + public FileSystem getFileSystem(); /* * @param maxKeyCount @@ -156,48 +152,44 @@ public interface Store extends HeapSize, StoreConfigInformation { * @param includeMVCCReadpoint whether we should out the MVCC readpoint * @return Writer for a new StoreFile in the tmp dir. */ - StoreFile.Writer createWriterInTmp( - long maxKeyCount, - Compression.Algorithm compression, - boolean isCompaction, - boolean includeMVCCReadpoint - ) throws IOException; + public StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, + boolean isCompaction, boolean includeMVCCReadpoint) throws IOException; // Compaction oriented methods - boolean throttleCompaction(long compactionSize); + public boolean throttleCompaction(long compactionSize); /** * getter for CompactionProgress object * @return CompactionProgress object; can be null */ - CompactionProgress getCompactionProgress(); + public CompactionProgress getCompactionProgress(); - CompactionContext requestCompaction() throws IOException; + public CompactionContext requestCompaction() throws IOException; - CompactionContext requestCompaction(int priority, CompactionRequest baseRequest) + public CompactionContext requestCompaction(int priority, CompactionRequest baseRequest) throws IOException; - void cancelRequestedCompaction(CompactionContext compaction); + public void cancelRequestedCompaction(CompactionContext compaction); - List compact(CompactionContext compaction) throws IOException; + public List compact(CompactionContext compaction) throws IOException; /** * @return true if we should run a major compaction. */ - boolean isMajorCompaction() throws IOException; + public boolean isMajorCompaction() throws IOException; - void triggerMajorCompaction(); + public void triggerMajorCompaction(); /** * See if there's too much store files in this store * @return true if number of store files is greater than the number defined in minFilesToCompact */ - boolean needsCompaction(); + public boolean needsCompaction(); - int getCompactPriority(); + public int getCompactPriority(); - StoreFlushContext createFlushContext(long cacheFlushId); + public StoreFlushContext createFlushContext(long cacheFlushId); /** * Call to complete a compaction. Its for the case where we find in the WAL a compaction @@ -205,18 +197,18 @@ public interface Store extends HeapSize, StoreConfigInformation { * See HBASE-2331. * @param compaction */ - void completeCompactionMarker(CompactionDescriptor compaction) + public void completeCompactionMarker(CompactionDescriptor compaction) throws IOException; // Split oriented methods - boolean canSplit(); + public boolean canSplit(); /** * Determines if Store should be split * @return byte[] if store should be split, null otherwise. */ - byte[] getSplitPoint(); + public byte[] getSplitPoint(); // Bulk Load methods @@ -224,7 +216,7 @@ public interface Store extends HeapSize, StoreConfigInformation { * This throws a WrongRegionException if the HFile does not fit in this region, or an * InvalidHFileException if the HFile is not valid. */ - void assertBulkLoadHFileOk(Path srcPath) throws IOException; + public void assertBulkLoadHFileOk(Path srcPath) throws IOException; /** * This method should only be called from HRegion. It is assumed that the ranges of values in the @@ -233,7 +225,7 @@ public interface Store extends HeapSize, StoreConfigInformation { * @param srcPathStr * @param sequenceId sequence Id associated with the HFile */ - void bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException; + public void bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException; // General accessors into the state of the store // TODO abstract some of this out into a metrics class @@ -241,50 +233,50 @@ public interface Store extends HeapSize, StoreConfigInformation { /** * @return true if the store has any underlying reference files to older HFiles */ - boolean hasReferences(); + public boolean hasReferences(); /** * @return The size of this store's memstore, in bytes */ - long getMemStoreSize(); + public long getMemStoreSize(); - HColumnDescriptor getFamily(); + public HColumnDescriptor getFamily(); /** * @return The maximum memstoreTS in all store files. */ - long getMaxMemstoreTS(); + public long getMaxMemstoreTS(); /** * @return the data block encoder */ - HFileDataBlockEncoder getDataBlockEncoder(); + public HFileDataBlockEncoder getDataBlockEncoder(); /** @return aggregate size of all HStores used in the last compaction */ - long getLastCompactSize(); + public long getLastCompactSize(); /** @return aggregate size of HStore */ - long getSize(); + public long getSize(); /** * @return Count of store files */ - int getStorefilesCount(); + public int getStorefilesCount(); /** * @return The size of the store files, in bytes, uncompressed. */ - long getStoreSizeUncompressed(); + public long getStoreSizeUncompressed(); /** * @return The size of the store files, in bytes. */ - long getStorefilesSize(); + public long getStorefilesSize(); /** * @return The size of the store file indexes, in bytes. */ - long getStorefilesIndexSize(); + public long getStorefilesIndexSize(); /** * Returns the total size of all index blocks in the data block indexes, including the root level, @@ -292,14 +284,14 @@ public interface Store extends HeapSize, StoreConfigInformation { * single-level indexes. * @return the total size of block indexes in the store */ - long getTotalStaticIndexSize(); + public long getTotalStaticIndexSize(); /** * Returns the total byte size of all Bloom filter bit arrays. For compound Bloom filters even the * Bloom blocks currently not loaded into the block cache are counted. * @return the total size of all Bloom filters in the store */ - long getTotalStaticBloomSize(); + public long getTotalStaticBloomSize(); // Test-helper methods @@ -307,40 +299,40 @@ public interface Store extends HeapSize, StoreConfigInformation { * Used for tests. * @return cache configuration for this Store. */ - CacheConfig getCacheConfig(); + public CacheConfig getCacheConfig(); /** * @return the parent region info hosting this store */ - HRegionInfo getRegionInfo(); + public HRegionInfo getRegionInfo(); - RegionCoprocessorHost getCoprocessorHost(); + public RegionCoprocessorHost getCoprocessorHost(); - boolean areWritesEnabled(); + public boolean areWritesEnabled(); /** * @return The smallest mvcc readPoint across all the scanners in this * region. Writes older than this readPoint, are included in every * read operation. */ - long getSmallestReadPoint(); + public long getSmallestReadPoint(); - String getColumnFamilyName(); + public String getColumnFamilyName(); - String getTableName(); + public String getTableName(); /* * @param o Observer who wants to know about changes in set of Readers */ - void addChangedReaderObserver(ChangedReadersObserver o); + public void addChangedReaderObserver(ChangedReadersObserver o); /* * @param o Observer no longer interested in changes in set of Readers. */ - void deleteChangedReaderObserver(ChangedReadersObserver o); + public void deleteChangedReaderObserver(ChangedReadersObserver o); /** * @return Whether this store has too many store files. */ - boolean hasTooManyStoreFiles(); + public boolean hasTooManyStoreFiles(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreConfigInformation.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreConfigInformation.java index 62cef1b0b7f..a96f1424261 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreConfigInformation.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreConfigInformation.java @@ -34,22 +34,22 @@ public interface StoreConfigInformation { * TODO: remove after HBASE-7252 is fixed. * @return Gets the Memstore flush size for the region that this store works with. */ - long getMemstoreFlushSize(); + public long getMemstoreFlushSize(); /** * @return Gets the cf-specific time-to-live for store files. */ - long getStoreFileTtl(); + public long getStoreFileTtl(); /** * @return Gets the cf-specific compaction check frequency multiplier. * The need for compaction (outside of normal checks during flush, open, etc.) will * be ascertained every multiplier * HConstants.THREAD_WAKE_FREQUENCY milliseconds. */ - long getCompactionCheckMultiplier(); + public long getCompactionCheckMultiplier(); /** * The number of files required before flushes for this store will be blocked. */ - long getBlockingFileCount(); + public long getBlockingFileCount(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java index 54ca48cfa32..85faabaeaa5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java @@ -44,41 +44,40 @@ public interface StoreFileManager { * Loads the initial store files into empty StoreFileManager. * @param storeFiles The files to load. */ - void loadFiles(List storeFiles); + public abstract void loadFiles(List storeFiles); /** * Adds new file, either for from MemStore flush or bulk insert, into the structure. * @param sf New store file. */ - void insertNewFile(StoreFile sf); + public abstract void insertNewFile(StoreFile sf); /** * Adds compaction results into the structure. * @param compactedFiles The input files for the compaction. * @param results The resulting files for the compaction. */ - void addCompactionResults( - Collection compactedFiles, Collection results - ); + public abstract void addCompactionResults( + Collection compactedFiles, Collection results); /** * Clears all the files currently in use and returns them. * @return The files previously in use. */ - ImmutableCollection clearFiles(); + public abstract ImmutableCollection clearFiles(); /** * Gets the snapshot of the store files currently in use. Can be used for things like metrics * and checks; should not assume anything about relations between store files in the list. * @return The list of StoreFiles. */ - Collection getStorefiles(); + public abstract Collection getStorefiles(); /** * Returns the number of files currently in use. * @return The number of files. */ - int getStorefileCount(); + public abstract int getStorefileCount(); /** * Gets the store files to scan for a Scan or Get request. @@ -87,9 +86,8 @@ public interface StoreFileManager { * @param stopRow Stop row of the request. * @return The list of files that are to be read for this request. */ - Collection getFilesForScanOrGet( - boolean isGet, byte[] startRow, byte[] stopRow - ); + public abstract Collection getFilesForScanOrGet(boolean isGet, + byte[] startRow, byte[] stopRow); /** * Gets initial, full list of candidate store files to check for row-key-before. @@ -97,9 +95,8 @@ public interface StoreFileManager { * @return The files that may have the key less than or equal to targetKey, in reverse * order of new-ness, and preference for target key. */ - Iterator getCandidateFilesForRowKeyBefore( - KeyValue targetKey - ); + public abstract Iterator getCandidateFilesForRowKeyBefore( + KeyValue targetKey); /** * Updates the candidate list for finding row key before. Based on the list of candidates @@ -112,9 +109,8 @@ public interface StoreFileManager { * @param candidate The current best candidate found. * @return The list to replace candidateFiles. */ - Iterator updateCandidateFilesForRowKeyBefore( - Iterator candidateFiles, KeyValue targetKey, KeyValue candidate - ); + public abstract Iterator updateCandidateFilesForRowKeyBefore( + Iterator candidateFiles, KeyValue targetKey, KeyValue candidate); /** @@ -122,10 +118,10 @@ public interface StoreFileManager { * @return The mid-point, or null if no split is possible. * @throws IOException */ - byte[] getSplitPoint() throws IOException; + public abstract byte[] getSplitPoint() throws IOException; /** * @return The store compaction priority. */ - int getStoreCompactionPriority(); + public abstract int getStoreCompactionPriority(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Dictionary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Dictionary.java index e99820ae19f..e1cfed195bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Dictionary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Dictionary.java @@ -28,7 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience; */ @InterfaceAudience.Private interface Dictionary { - byte NOT_IN_DICTIONARY = -1; + static final byte NOT_IN_DICTIONARY = -1; /** * Gets an entry from the dictionary. @@ -36,7 +36,7 @@ interface Dictionary { * @param idx index of the entry * @return the entry, or null if non existent */ - byte[] getEntry(short idx); + public byte[] getEntry(short idx); /** * Finds the index of an entry. @@ -47,7 +47,7 @@ interface Dictionary { * @param length Length beyond offset that comprises entry; must be > 0. * @return the index of the entry, or {@link #NOT_IN_DICTIONARY} if not found */ - short findEntry(byte[] data, int offset, int length); + public short findEntry(byte[] data, int offset, int length); /** * Adds an entry to the dictionary. @@ -62,10 +62,10 @@ interface Dictionary { * @return the index of the entry */ - short addEntry(byte[] data, int offset, int length); + public short addEntry(byte[] data, int offset, int length); /** * Flushes the dictionary, empties all values. */ - void clear(); + public void clear(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java index 62643f358de..97413b31cfb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java @@ -41,26 +41,26 @@ import org.apache.hadoop.io.Writable; @InterfaceAudience.Private public interface HLog { - Log LOG = LogFactory.getLog(HLog.class); + public static final Log LOG = LogFactory.getLog(HLog.class); /** File Extension used while splitting an HLog into regions (HBASE-2312) */ - String SPLITTING_EXT = "-splitting"; - boolean SPLIT_SKIP_ERRORS_DEFAULT = false; + public static final String SPLITTING_EXT = "-splitting"; + public static final boolean SPLIT_SKIP_ERRORS_DEFAULT = false; /** The META region's HLog filename extension */ - String META_HLOG_FILE_EXTN = ".meta"; + public static final String META_HLOG_FILE_EXTN = ".meta"; /** * Configuration name of HLog Trailer's warning size. If a waltrailer's size is greater than the * configured size, a warning is logged. This is used with Protobuf reader/writer. */ - String WAL_TRAILER_WARN_SIZE = + public static final String WAL_TRAILER_WARN_SIZE = "hbase.regionserver.waltrailer.warn.size"; - int DEFAULT_WAL_TRAILER_WARN_SIZE = 1024*1024; // 1MB + public static final int DEFAULT_WAL_TRAILER_WARN_SIZE = 1024*1024; // 1MB - Pattern EDITFILES_NAME_PATTERN = Pattern.compile("-?[0-9]+"); - String RECOVERED_LOG_TMPFILE_SUFFIX = ".temp"; + static final Pattern EDITFILES_NAME_PATTERN = Pattern.compile("-?[0-9]+"); + public static final String RECOVERED_LOG_TMPFILE_SUFFIX = ".temp"; - interface Reader { + public interface Reader { /** * @param fs File system. @@ -88,7 +88,7 @@ public interface HLog { WALTrailer getWALTrailer(); } - interface Writer { + public interface Writer { void init(FileSystem fs, Path path, Configuration c) throws IOException; void close() throws IOException; @@ -110,7 +110,7 @@ public interface HLog { * Utility class that lets us keep track of the edit with it's key Only used * when splitting logs */ - class Entry implements Writable { + public static class Entry implements Writable { private WALEdit edit; private HLogKey key; @@ -185,19 +185,19 @@ public interface HLog { * * @param listener */ - void registerWALActionsListener(final WALActionsListener listener); + public void registerWALActionsListener(final WALActionsListener listener); /** * unregisters WALActionsListener * * @param listener */ - boolean unregisterWALActionsListener(final WALActionsListener listener); + public boolean unregisterWALActionsListener(final WALActionsListener listener); /** * @return Current state of the monotonically increasing file id. */ - long getFilenum(); + public long getFilenum(); /** * Called by HRegionServer when it opens a new region to ensure that log @@ -208,12 +208,12 @@ public interface HLog { * We'll set log edit/sequence number to this value if it is greater * than the current value. */ - void setSequenceNumber(final long newvalue); + public void setSequenceNumber(final long newvalue); /** * @return log sequence number */ - long getSequenceNumber(); + public long getSequenceNumber(); /** * Roll the log writer. That is, start writing log messages to a new file. @@ -228,7 +228,7 @@ public interface HLog { * @throws org.apache.hadoop.hbase.exceptions.FailedLogCloseException * @throws IOException */ - byte[][] rollWriter() throws FailedLogCloseException, IOException; + public byte[][] rollWriter() throws FailedLogCloseException, IOException; /** * Roll the log writer. That is, start writing log messages to a new file. @@ -246,7 +246,7 @@ public interface HLog { * @throws org.apache.hadoop.hbase.exceptions.FailedLogCloseException * @throws IOException */ - byte[][] rollWriter(boolean force) throws FailedLogCloseException, + public byte[][] rollWriter(boolean force) throws FailedLogCloseException, IOException; /** @@ -254,22 +254,21 @@ public interface HLog { * * @throws IOException */ - void close() throws IOException; + public void close() throws IOException; /** * Shut down the log and delete the log directory * * @throws IOException */ - void closeAndDelete() throws IOException; + public void closeAndDelete() throws IOException; /** * Same as {@link #appendNoSync(HRegionInfo, byte[], WALEdit, UUID, long, HTableDescriptor)}, * except it causes a sync on the log */ - void append( - HRegionInfo info, byte[] tableName, WALEdit edits, final long now, HTableDescriptor htd - ) throws IOException; + public void append(HRegionInfo info, byte[] tableName, WALEdit edits, + final long now, HTableDescriptor htd) throws IOException; /** * Append a set of edits to the log. Log edits are keyed by (encoded) @@ -282,14 +281,8 @@ public interface HLog { * @param htd * @param isInMemstore Whether the record is in memstore. False for system records. */ - void append( - HRegionInfo info, - byte[] tableName, - WALEdit edits, - final long now, - HTableDescriptor htd, - boolean isInMemstore - ) throws IOException; + public void append(HRegionInfo info, byte[] tableName, WALEdit edits, + final long now, HTableDescriptor htd, boolean isInMemstore) throws IOException; /** * Append a set of edits to the log. Log edits are keyed by (encoded) @@ -306,27 +299,21 @@ public interface HLog { * @return txid of this transaction * @throws IOException */ - long appendNoSync( - HRegionInfo info, - byte[] tableName, - WALEdit edits, - UUID clusterId, - final long now, - HTableDescriptor htd - ) throws IOException; + public long appendNoSync(HRegionInfo info, byte[] tableName, WALEdit edits, + UUID clusterId, final long now, HTableDescriptor htd) throws IOException; - void hsync() throws IOException; + public void hsync() throws IOException; - void hflush() throws IOException; + public void hflush() throws IOException; - void sync() throws IOException; + public void sync() throws IOException; - void sync(long txid) throws IOException; + public void sync(long txid) throws IOException; /** * Obtain a log sequence number. */ - long obtainSeqNum(); + public long obtainSeqNum(); /** * WAL keeps track of the sequence numbers that were not yet flushed from memstores @@ -343,13 +330,13 @@ public interface HLog { * the resulting file as an upper-bound seqNum for that file), or NULL if flush * should not be started. */ - Long startCacheFlush(final byte[] encodedRegionName); + public Long startCacheFlush(final byte[] encodedRegionName); /** * Complete the cache flush. * @param encodedRegionName Encoded region name. */ - void completeCacheFlush(final byte[] encodedRegionName); + public void completeCacheFlush(final byte[] encodedRegionName); /** * Abort a cache flush. Call if the flush fails. Note that the only recovery @@ -357,24 +344,24 @@ public interface HLog { * snapshot content dropped by the failure gets restored to the memstore.v * @param encodedRegionName Encoded region name. */ - void abortCacheFlush(byte[] encodedRegionName); + public void abortCacheFlush(byte[] encodedRegionName); /** * @return Coprocessor host. */ - WALCoprocessorHost getCoprocessorHost(); + public WALCoprocessorHost getCoprocessorHost(); /** * Get LowReplication-Roller status * * @return lowReplicationRollEnabled */ - boolean isLowReplicationRollEnabled(); + public boolean isLowReplicationRollEnabled(); /** Gets the earliest sequence number in the memstore for this particular region. * This can serve as best-effort "recent" WAL number for this region. * @param encodedRegionName The region to get the number for. * @return The number if present, HConstants.NO_SEQNUM if absent. */ - long getEarliestMemstoreSeqNum(byte[] encodedRegionName); + public long getEarliestMemstoreSeqNum(byte[] encodedRegionName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java index 25c815b4050..354f2e21196 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java @@ -38,7 +38,7 @@ public interface WALActionsListener { * @param oldPath the path to the old hlog * @param newPath the path to the new hlog */ - void preLogRoll(Path oldPath, Path newPath) throws IOException; + public void preLogRoll(Path oldPath, Path newPath) throws IOException; /** * The WAL has been rolled. The oldPath can be null if this is @@ -46,31 +46,31 @@ public interface WALActionsListener { * @param oldPath the path to the old hlog * @param newPath the path to the new hlog */ - void postLogRoll(Path oldPath, Path newPath) throws IOException; + public void postLogRoll(Path oldPath, Path newPath) throws IOException; /** * The WAL is going to be archived. * @param oldPath the path to the old hlog * @param newPath the path to the new hlog */ - void preLogArchive(Path oldPath, Path newPath) throws IOException; + public void preLogArchive(Path oldPath, Path newPath) throws IOException; /** * The WAL has been archived. * @param oldPath the path to the old hlog * @param newPath the path to the new hlog */ - void postLogArchive(Path oldPath, Path newPath) throws IOException; + public void postLogArchive(Path oldPath, Path newPath) throws IOException; /** * A request was made that the WAL be rolled. */ - void logRollRequested(); + public void logRollRequested(); /** * The WAL is about to close. */ - void logCloseRequested(); + public void logCloseRequested(); /** * Called before each write. @@ -78,9 +78,8 @@ public interface WALActionsListener { * @param logKey * @param logEdit */ - void visitLogEntryBeforeWrite( - HRegionInfo info, HLogKey logKey, WALEdit logEdit - ); + public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey, + WALEdit logEdit); /** * @@ -88,8 +87,7 @@ public interface WALActionsListener { * @param logKey * @param logEdit */ - void visitLogEntryBeforeWrite( - HTableDescriptor htd, HLogKey logKey, WALEdit logEdit - ); + public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, + WALEdit logEdit); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java index 8a5c08f10fa..05a586e5594 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java @@ -41,63 +41,61 @@ public interface ReplicationSourceInterface { * @param peerClusterId the id of the peer cluster * @throws IOException */ - void init( - final Configuration conf, - final FileSystem fs, - final ReplicationSourceManager manager, - final Stoppable stopper, - final String peerClusterId - ) throws IOException; + public void init(final Configuration conf, + final FileSystem fs, + final ReplicationSourceManager manager, + final Stoppable stopper, + final String peerClusterId) throws IOException; /** * Add a log to the list of logs to replicate * @param log path to the log to replicate */ - void enqueueLog(Path log); + public void enqueueLog(Path log); /** * Get the current log that's replicated * @return the current log */ - Path getCurrentPath(); + public Path getCurrentPath(); /** * Start the replication */ - void startup(); + public void startup(); /** * End the replication * @param reason why it's terminating */ - void terminate(String reason); + public void terminate(String reason); /** * End the replication * @param reason why it's terminating * @param cause the error that's causing it */ - void terminate(String reason, Exception cause); + public void terminate(String reason, Exception cause); /** * Get the id that the source is replicating to * * @return peer cluster id */ - String getPeerClusterZnode(); + public String getPeerClusterZnode(); /** * Get the id that the source is replicating to. * * @return peer cluster id */ - String getPeerClusterId(); + public String getPeerClusterId(); /** * Get a string representation of the current statistics * for this source * @return printable stats */ - String getStats(); + public String getStats(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/Constants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/Constants.java index cb337ffa12f..43ff5fc1503 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/Constants.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/Constants.java @@ -28,19 +28,19 @@ import org.apache.hadoop.classification.InterfaceStability; @InterfaceAudience.Public @InterfaceStability.Stable public interface Constants { - String VERSION_STRING = "0.0.2"; + public static final String VERSION_STRING = "0.0.2"; - int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours + public static final int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours - int DEFAULT_LISTEN_PORT = 8080; + public static final int DEFAULT_LISTEN_PORT = 8080; - String MIMETYPE_TEXT = "text/plain"; - String MIMETYPE_HTML = "text/html"; - String MIMETYPE_XML = "text/xml"; - String MIMETYPE_BINARY = "application/octet-stream"; - String MIMETYPE_PROTOBUF = "application/x-protobuf"; - String MIMETYPE_PROTOBUF_IETF = "application/protobuf"; - String MIMETYPE_JSON = "application/json"; + public static final String MIMETYPE_TEXT = "text/plain"; + public static final String MIMETYPE_HTML = "text/html"; + public static final String MIMETYPE_XML = "text/xml"; + public static final String MIMETYPE_BINARY = "application/octet-stream"; + public static final String MIMETYPE_PROTOBUF = "application/x-protobuf"; + public static final String MIMETYPE_PROTOBUF_IETF = "application/protobuf"; + public static final String MIMETYPE_JSON = "application/json"; - String CRLF = "\r\n"; + public static final String CRLF = "\r\n"; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java index 4f7d9b6e225..50ceb9330f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java @@ -29,11 +29,11 @@ import org.apache.hadoop.classification.InterfaceAudience; * ProtobufMessageBodyProducer adapters. */ @InterfaceAudience.Private -public interface ProtobufMessageHandler { +public abstract interface ProtobufMessageHandler { /** * @return the protobuf represention of the model */ - byte[] createProtobufOutput(); + public byte[] createProtobufOutput(); /** * Initialize the model from a protobuf representation. @@ -41,6 +41,6 @@ public interface ProtobufMessageHandler { * @return reference to self for convenience * @throws IOException */ - ProtobufMessageHandler getObjectFromMessage(byte[] message) + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CodeToClassAndBackFor96Migration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CodeToClassAndBackFor96Migration.java index acab5886b30..ba7328213e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CodeToClassAndBackFor96Migration.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CodeToClassAndBackFor96Migration.java @@ -39,13 +39,13 @@ interface CodeToClassAndBackFor96Migration { /** * Static map that contains mapping from code to class */ - Map> CODE_TO_CLASS = + static final Map> CODE_TO_CLASS = new HashMap>(); /** * Static map that contains mapping from class to code */ - Map, Byte> CLASS_TO_CODE = + static final Map, Byte> CLASS_TO_CODE = new HashMap, Byte>(); /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java index 604fa97e3a4..0e32951b758 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java @@ -19,31 +19,31 @@ package org.apache.hadoop.hbase.thrift; public interface IncrementCoalescerMBean { - int getQueueSize(); + public int getQueueSize(); - int getMaxQueueSize(); + public int getMaxQueueSize(); - void setMaxQueueSize(int newSize); + public void setMaxQueueSize(int newSize); - long getPoolCompletedTaskCount(); + public long getPoolCompletedTaskCount(); - long getPoolTaskCount(); + public long getPoolTaskCount(); - int getPoolLargestPoolSize(); + public int getPoolLargestPoolSize(); - int getCorePoolSize(); + public int getCorePoolSize(); - void setCorePoolSize(int newCoreSize); + public void setCorePoolSize(int newCoreSize); - int getMaxPoolSize(); + public int getMaxPoolSize(); - void setMaxPoolSize(int newMaxSize); + public void setMaxPoolSize(int newMaxSize); - long getFailedIncrements(); + public long getFailedIncrements(); - long getSuccessfulCoalescings(); + public long getSuccessfulCoalescings(); - long getTotalIncrements(); + public long getTotalIncrements(); - long getCountersMapSize(); + public long getCountersMapSize(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 4f37aa23d58..a3c88c62921 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -47,9 +47,9 @@ import org.apache.hadoop.hbase.client.HBaseAdmin; public final class Canary implements Tool { // Sink interface used by the canary to outputs information public interface Sink { - void publishReadFailure(HRegionInfo region); - void publishReadFailure(HRegionInfo region, HColumnDescriptor column); - void publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime); + public void publishReadFailure(HRegionInfo region); + public void publishReadFailure(HRegionInfo region, HColumnDescriptor column); + public void publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime); } // Simple implementation of canary sink that allows to plot on diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java index 8b884cf7423..2ee347c527b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java @@ -34,6 +34,6 @@ public interface CancelableProgressable { * operation should be canceled and rolled back. * @return whether to continue (true) or cancel (false) the operation */ - boolean progress(); + public boolean progress(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 68dc969e619..920b66d5029 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -2903,7 +2903,7 @@ public class HBaseFsck extends Configured implements Tool { } public interface ErrorReporter { - enum ERROR_CODE { + public static enum ERROR_CODE { UNKNOWN, NO_META_REGION, NULL_META_REGION, NO_VERSION_FILE, NOT_IN_META_HDFS, NOT_IN_META, NOT_IN_META_OR_DEPLOYED, NOT_IN_HDFS_OR_DEPLOYED, NOT_IN_HDFS, SERVER_DOES_NOT_MATCH_META, NOT_DEPLOYED, MULTI_DEPLOYED, SHOULD_NOT_BE_DEPLOYED, MULTI_META_REGION, RS_CONNECT_FAILURE, @@ -2912,26 +2912,20 @@ public class HBaseFsck extends Configured implements Tool { ORPHAN_HDFS_REGION, LINGERING_SPLIT_PARENT, NO_TABLEINFO_FILE, LINGERING_REFERENCE_HFILE, WRONG_USAGE, EMPTY_META_CELL, EXPIRED_TABLE_LOCK } - void clear(); - void report(String message); - void reportError(String message); - void reportError(ERROR_CODE errorCode, String message); - void reportError(ERROR_CODE errorCode, String message, TableInfo table); - void reportError(ERROR_CODE errorCode, String message, TableInfo table, HbckInfo info); - void reportError( - ERROR_CODE errorCode, - String message, - TableInfo table, - HbckInfo info1, - HbckInfo info2 - ); - int summarize(); - void detail(String details); - ArrayList getErrorList(); - void progress(); - void print(String message); - void resetErrors(); - boolean tableHasErrors(TableInfo table); + public void clear(); + public void report(String message); + public void reportError(String message); + public void reportError(ERROR_CODE errorCode, String message); + public void reportError(ERROR_CODE errorCode, String message, TableInfo table); + public void reportError(ERROR_CODE errorCode, String message, TableInfo table, HbckInfo info); + public void reportError(ERROR_CODE errorCode, String message, TableInfo table, HbckInfo info1, HbckInfo info2); + public int summarize(); + public void detail(String details); + public ArrayList getErrorList(); + public void progress(); + public void print(String message); + public void resetErrors(); + public boolean tableHasErrors(TableInfo table); } static class PrintingErrorReporter implements ErrorReporter { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java index ff4023b74bd..8ae0a5e435d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; */ @InterfaceAudience.Private public interface KeyRange { - byte[] getStartKey(); + abstract byte[] getStartKey(); - byte[] getEndKey(); + abstract byte[] getEndKey(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index 6beed929156..932c3cd2f78 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -60,7 +60,7 @@ public abstract class ModifyRegionUtils { } public interface RegionFillTask { - void fillRegion(final HRegion region) throws IOException; + public void fillRegion(final HRegion region) throws IOException; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index 6df4790bc03..f811ab79704 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -147,7 +147,7 @@ public class RegionSplitter { * {@link RegionSplitter#rollingSplit(String, SplitAlgorithm, Configuration)} with the * argument splitClassName giving the name of your class. */ - public interface SplitAlgorithm { + public static interface SplitAlgorithm { /** * Split a pre-existing region into 2 regions. * @@ -1046,4 +1046,4 @@ public class RegionSplitter { + "," + rowToStr(lastRow()) + "]"; } } -} +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index c69c97c8ee9..ca6e7f7989a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -382,11 +382,11 @@ public abstract class HBaseTestCase extends TestCase { /** * Implementors can flushcache. */ - public interface FlushCache { + public static interface FlushCache { /** * @throws IOException */ - void flushcache() throws IOException; + public void flushcache() throws IOException; } /** @@ -395,23 +395,23 @@ public abstract class HBaseTestCase extends TestCase { * * TOOD: Come up w/ a better name for this interface. */ - public interface Incommon { + public static interface Incommon { /** * * @param delete * @param writeToWAL * @throws IOException */ - void delete(Delete delete, boolean writeToWAL) + public void delete(Delete delete, boolean writeToWAL) throws IOException; /** * @param put * @throws IOException */ - void put(Put put) throws IOException; + public void put(Put put) throws IOException; - Result get(Get get) throws IOException; + public Result get(Get get) throws IOException; /** * @param family @@ -421,9 +421,8 @@ public abstract class HBaseTestCase extends TestCase { * @return scanner for specified columns, first row and timestamp * @throws IOException */ - ScannerIncommon getScanner( - byte[] family, byte[][] qualifiers, byte[] firstRow, long ts - ) + public ScannerIncommon getScanner(byte [] family, byte [][] qualifiers, + byte [] firstRow, long ts) throws IOException; } @@ -521,10 +520,10 @@ public abstract class HBaseTestCase extends TestCase { public interface ScannerIncommon extends Iterable { - boolean next(List values) + public boolean next(List values) throws IOException; - void close() throws IOException; + public void close() throws IOException; } public static class ClientScannerIncommon implements ScannerIncommon { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 8a453d45d57..15710da2c99 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -198,7 +198,7 @@ public class PerformanceEvaluation extends Configured implements Tool { /** * Implementations can have their status set. */ - interface Status { + static interface Status { /** * Sets status * @param msg status message diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomDistribution.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomDistribution.java index 49f57de6e29..7232cada690 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomDistribution.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomDistribution.java @@ -33,13 +33,13 @@ public class RandomDistribution { /** * Interface for discrete (integer) random distributions. */ - public interface DiscreteRNG { + public static interface DiscreteRNG { /** * Get the next random number * * @return the next random number. */ - int nextInt(); + public int nextInt(); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java index 152667a7bdc..2345a0c3103 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java @@ -140,7 +140,7 @@ public class TestSplitLogManager { } private interface Expr { - long eval(); + public long eval(); } private void waitForCounter(final AtomicLong ctr, long oldval, long newval, long timems) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java index 4d27769af2b..f41a41c2753 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java @@ -182,7 +182,7 @@ public class PerformanceEvaluation { /** * Implementations can have their status set. */ - interface Status { + static interface Status { /** * Sets status * @param msg status message diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index 827fcf8002f..c1364793f03 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -92,7 +92,7 @@ public class TestTokenAuthentication { } private static Log LOG = LogFactory.getLog(TestTokenAuthentication.class); - public interface AuthenticationServiceSecurityInfo {} + public static interface AuthenticationServiceSecurityInfo {} /** * Basic server process for RPC authentication testing