HBASE-8918 Removes redundant identifiers from interfaces; REVERT -- PREMATURE APPLICATION
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1501909 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
da3e3046ad
commit
e79eefe78b
|
@ -35,11 +35,11 @@ public interface Abortable {
|
|||
* @param why Why we're aborting.
|
||||
* @param e Throwable that caused abort. Can be null.
|
||||
*/
|
||||
void abort(String why, Throwable e);
|
||||
public void abort(String why, Throwable e);
|
||||
|
||||
/**
|
||||
* Check if the server or client was aborted.
|
||||
* @return true if the server or client was aborted, false otherwise
|
||||
*/
|
||||
boolean isAborted();
|
||||
public boolean isAborted();
|
||||
}
|
||||
|
|
|
@ -26,21 +26,21 @@ import java.io.IOException;
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public interface Coprocessor {
|
||||
int VERSION = 1;
|
||||
static final int VERSION = 1;
|
||||
|
||||
/** Highest installation priority */
|
||||
int PRIORITY_HIGHEST = 0;
|
||||
static final int PRIORITY_HIGHEST = 0;
|
||||
/** High (system) installation priority */
|
||||
int PRIORITY_SYSTEM = Integer.MAX_VALUE / 4;
|
||||
static final int PRIORITY_SYSTEM = Integer.MAX_VALUE / 4;
|
||||
/** Default installation priority for user coprocessors */
|
||||
int PRIORITY_USER = Integer.MAX_VALUE / 2;
|
||||
static final int PRIORITY_USER = Integer.MAX_VALUE / 2;
|
||||
/** Lowest installation priority */
|
||||
int PRIORITY_LOWEST = Integer.MAX_VALUE;
|
||||
static final int PRIORITY_LOWEST = Integer.MAX_VALUE;
|
||||
|
||||
/**
|
||||
* Lifecycle state of a given coprocessor instance.
|
||||
*/
|
||||
enum State {
|
||||
public enum State {
|
||||
UNINSTALLED,
|
||||
INSTALLED,
|
||||
STARTING,
|
||||
|
|
|
@ -30,26 +30,26 @@ import java.io.IOException;
|
|||
public interface CoprocessorEnvironment {
|
||||
|
||||
/** @return the Coprocessor interface version */
|
||||
int getVersion();
|
||||
public int getVersion();
|
||||
|
||||
/** @return the HBase version as a string (e.g. "0.21.0") */
|
||||
String getHBaseVersion();
|
||||
public String getHBaseVersion();
|
||||
|
||||
/** @return the loaded coprocessor instance */
|
||||
Coprocessor getInstance();
|
||||
public Coprocessor getInstance();
|
||||
|
||||
/** @return the priority assigned to the loaded coprocessor */
|
||||
int getPriority();
|
||||
public int getPriority();
|
||||
|
||||
/** @return the load sequence number */
|
||||
int getLoadSequence();
|
||||
public int getLoadSequence();
|
||||
|
||||
/** @return the configuration */
|
||||
Configuration getConfiguration();
|
||||
public Configuration getConfiguration();
|
||||
|
||||
/**
|
||||
* @return an interface for accessing the given table
|
||||
* @throws IOException
|
||||
*/
|
||||
HTableInterface getTable(byte[] tableName) throws IOException;
|
||||
public HTableInterface getTable(byte[] tableName) throws IOException;
|
||||
}
|
||||
|
|
|
@ -32,20 +32,20 @@ public interface Server extends Abortable, Stoppable {
|
|||
/**
|
||||
* Gets the configuration object for this server.
|
||||
*/
|
||||
Configuration getConfiguration();
|
||||
public Configuration getConfiguration();
|
||||
|
||||
/**
|
||||
* Gets the ZooKeeper instance for this server.
|
||||
*/
|
||||
ZooKeeperWatcher getZooKeeper();
|
||||
public ZooKeeperWatcher getZooKeeper();
|
||||
|
||||
/**
|
||||
* @return Master's instance of {@link CatalogTracker}
|
||||
*/
|
||||
CatalogTracker getCatalogTracker();
|
||||
public CatalogTracker getCatalogTracker();
|
||||
|
||||
/**
|
||||
* @return The unique server name for this server.
|
||||
*/
|
||||
ServerName getServerName();
|
||||
public ServerName getServerName();
|
||||
}
|
||||
|
|
|
@ -29,10 +29,10 @@ public interface Stoppable {
|
|||
* Stop this service.
|
||||
* @param why Why we're stopping.
|
||||
*/
|
||||
void stop(String why);
|
||||
public void stop(String why);
|
||||
|
||||
/**
|
||||
* @return True if {@link #stop(String)} has been closed.
|
||||
*/
|
||||
boolean isStopped();
|
||||
public boolean isStopped();
|
||||
}
|
||||
|
|
|
@ -558,7 +558,7 @@ public class MetaReader {
|
|||
* @return True if we are to proceed scanning the table, else false if
|
||||
* we are to stop now.
|
||||
*/
|
||||
boolean visit(final Result r) throws IOException;
|
||||
public boolean visit(final Result r) throws IOException;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -117,7 +117,7 @@ class AsyncProcess<CResult> {
|
|||
* </li>
|
||||
* </list>
|
||||
*/
|
||||
interface AsyncProcessCallback<CResult> {
|
||||
static interface AsyncProcessCallback<CResult> {
|
||||
|
||||
/**
|
||||
* Called on success. originalIndex holds the index in the action list.
|
||||
|
|
|
@ -34,18 +34,18 @@ public interface Attributes {
|
|||
* @param name attribute name
|
||||
* @param value attribute value
|
||||
*/
|
||||
void setAttribute(String name, byte[] value);
|
||||
public void setAttribute(String name, byte[] value);
|
||||
|
||||
/**
|
||||
* Gets an attribute
|
||||
* @param name attribute name
|
||||
* @return attribute value if attribute is set, <tt>null</tt> otherwise
|
||||
*/
|
||||
byte[] getAttribute(String name);
|
||||
public byte[] getAttribute(String name);
|
||||
|
||||
/**
|
||||
* Gets all attributes
|
||||
* @return unmodifiable map of all attributes
|
||||
*/
|
||||
Map<String, byte[]> getAttributesMap();
|
||||
public Map<String, byte[]> getAttributesMap();
|
||||
}
|
||||
|
|
|
@ -82,19 +82,19 @@ class ClusterStatusListener implements Closeable {
|
|||
*
|
||||
* @param sn - the server name
|
||||
*/
|
||||
void newDead(ServerName sn);
|
||||
public void newDead(ServerName sn);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* The interface to be implented by a listener of a cluster status event.
|
||||
*/
|
||||
interface Listener extends Closeable {
|
||||
static interface Listener extends Closeable {
|
||||
/**
|
||||
* Called to close the resources, if any. Cannot throw an exception.
|
||||
*/
|
||||
@Override
|
||||
void close();
|
||||
public void close();
|
||||
|
||||
/**
|
||||
* Called to connect.
|
||||
|
@ -102,7 +102,7 @@ class ClusterStatusListener implements Closeable {
|
|||
* @param conf Configuration to use.
|
||||
* @throws IOException
|
||||
*/
|
||||
void connect(Configuration conf) throws IOException;
|
||||
public void connect(Configuration conf) throws IOException;
|
||||
}
|
||||
|
||||
public ClusterStatusListener(DeadServerHandler dsh, Configuration conf,
|
||||
|
|
|
@ -62,10 +62,10 @@ public interface HConnection extends Abortable, Closeable {
|
|||
/**
|
||||
* @return Configuration instance being used by this HConnection instance.
|
||||
*/
|
||||
Configuration getConfiguration();
|
||||
public Configuration getConfiguration();
|
||||
|
||||
/** @return - true if the master server is running */
|
||||
boolean isMasterRunning()
|
||||
public boolean isMasterRunning()
|
||||
throws MasterNotRunningException, ZooKeeperConnectionException;
|
||||
|
||||
/**
|
||||
|
@ -76,21 +76,21 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @return true if the table is enabled, false otherwise
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
boolean isTableEnabled(byte[] tableName) throws IOException;
|
||||
public boolean isTableEnabled(byte[] tableName) throws IOException;
|
||||
|
||||
/**
|
||||
* @param tableName table name
|
||||
* @return true if the table is disabled, false otherwise
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
boolean isTableDisabled(byte[] tableName) throws IOException;
|
||||
public boolean isTableDisabled(byte[] tableName) throws IOException;
|
||||
|
||||
/**
|
||||
* @param tableName table name
|
||||
* @return true if all regions of the table are available, false otherwise
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
boolean isTableAvailable(byte[] tableName) throws IOException;
|
||||
public boolean isTableAvailable(byte[] tableName) throws IOException;
|
||||
|
||||
/**
|
||||
* Use this api to check if the table has been created with the specified number of
|
||||
|
@ -104,7 +104,7 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @throws IOException
|
||||
* if a remote or network exception occurs
|
||||
*/
|
||||
boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws IOException;
|
||||
public boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws IOException;
|
||||
|
||||
/**
|
||||
* List all the userspace tables. In other words, scan the META table.
|
||||
|
@ -116,14 +116,14 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @return - returns an array of HTableDescriptors
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
HTableDescriptor[] listTables() throws IOException;
|
||||
public HTableDescriptor[] listTables() throws IOException;
|
||||
|
||||
/**
|
||||
* @param tableName table name
|
||||
* @return table metadata
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
HTableDescriptor getHTableDescriptor(byte[] tableName)
|
||||
public HTableDescriptor getHTableDescriptor(byte[] tableName)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -135,15 +135,14 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* question
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
HRegionLocation locateRegion(
|
||||
final byte[] tableName, final byte[] row
|
||||
)
|
||||
public HRegionLocation locateRegion(final byte [] tableName,
|
||||
final byte [] row)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Allows flushing the region cache.
|
||||
*/
|
||||
void clearRegionCache();
|
||||
public void clearRegionCache();
|
||||
|
||||
/**
|
||||
* Allows flushing the region cache of all locations that pertain to
|
||||
|
@ -151,13 +150,13 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @param tableName Name of the table whose regions we are to remove from
|
||||
* cache.
|
||||
*/
|
||||
void clearRegionCache(final byte[] tableName);
|
||||
public void clearRegionCache(final byte [] tableName);
|
||||
|
||||
/**
|
||||
* Deletes cached locations for the specific region.
|
||||
* @param location The location object for the region, to be purged from cache.
|
||||
*/
|
||||
void deleteCachedRegionLocation(final HRegionLocation location);
|
||||
public void deleteCachedRegionLocation(final HRegionLocation location);
|
||||
|
||||
/**
|
||||
* Find the location of the region of <i>tableName</i> that <i>row</i>
|
||||
|
@ -168,9 +167,8 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* question
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
HRegionLocation relocateRegion(
|
||||
final byte[] tableName, final byte[] row
|
||||
)
|
||||
public HRegionLocation relocateRegion(final byte [] tableName,
|
||||
final byte [] row)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -181,9 +179,8 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @param exception the exception if any. Can be null.
|
||||
* @param source the previous location
|
||||
*/
|
||||
void updateCachedLocations(
|
||||
byte[] tableName, byte[] rowkey, Object exception, HRegionLocation source
|
||||
);
|
||||
public void updateCachedLocations(byte[] tableName, byte[] rowkey,
|
||||
Object exception, HRegionLocation source);
|
||||
|
||||
/**
|
||||
* Gets the location of the region of <i>regionName</i>.
|
||||
|
@ -192,7 +189,7 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* question
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
HRegionLocation locateRegion(final byte[] regionName)
|
||||
public HRegionLocation locateRegion(final byte [] regionName)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -201,7 +198,7 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @return list of region locations for all regions of table
|
||||
* @throws IOException
|
||||
*/
|
||||
List<HRegionLocation> locateRegions(final byte[] tableName)
|
||||
public List<HRegionLocation> locateRegions(final byte[] tableName)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -213,19 +210,18 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @return list of region locations for all regions of table
|
||||
* @throws IOException
|
||||
*/
|
||||
List<HRegionLocation> locateRegions(
|
||||
final byte[] tableName, final boolean useCache, final boolean offlined
|
||||
) throws IOException;
|
||||
public List<HRegionLocation> locateRegions(final byte[] tableName, final boolean useCache,
|
||||
final boolean offlined) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns a {@link MasterAdminKeepAliveConnection} to the active master
|
||||
*/
|
||||
MasterAdminService.BlockingInterface getMasterAdmin() throws IOException;
|
||||
public MasterAdminService.BlockingInterface getMasterAdmin() throws IOException;
|
||||
|
||||
/**
|
||||
* Returns an {@link MasterMonitorKeepAliveConnection} to the active master
|
||||
*/
|
||||
MasterMonitorService.BlockingInterface getMasterMonitor() throws IOException;
|
||||
public MasterMonitorService.BlockingInterface getMasterMonitor() throws IOException;
|
||||
|
||||
/**
|
||||
* Establishes a connection to the region server at the specified address.
|
||||
|
@ -233,7 +229,7 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @return proxy for HRegionServer
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException;
|
||||
public AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException;
|
||||
|
||||
/**
|
||||
* Establishes a connection to the region server at the specified address, and returns
|
||||
|
@ -244,7 +240,7 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @throws IOException if a remote or network exception occurs
|
||||
*
|
||||
*/
|
||||
ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException;
|
||||
public ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException;
|
||||
|
||||
/**
|
||||
* Establishes a connection to the region server at the specified address.
|
||||
|
@ -254,7 +250,7 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @throws IOException if a remote or network exception occurs
|
||||
* @deprecated You can pass master flag but nothing special is done.
|
||||
*/
|
||||
AdminService.BlockingInterface getAdmin(final ServerName serverName, boolean getMaster)
|
||||
public AdminService.BlockingInterface getAdmin(final ServerName serverName, boolean getMaster)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -281,7 +277,7 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @throws RuntimeException other unspecified error
|
||||
*/
|
||||
@Deprecated
|
||||
<T> T getRegionServerWithRetries(ServerCallable<T> callable)
|
||||
public <T> T getRegionServerWithRetries(ServerCallable<T> callable)
|
||||
throws IOException, RuntimeException;
|
||||
|
||||
/**
|
||||
|
@ -294,7 +290,7 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @throws RuntimeException other unspecified error
|
||||
*/
|
||||
@Deprecated
|
||||
<T> T getRegionServerWithoutRetries(ServerCallable<T> callable)
|
||||
public <T> T getRegionServerWithoutRetries(ServerCallable<T> callable)
|
||||
throws IOException, RuntimeException;
|
||||
|
||||
/**
|
||||
|
@ -313,9 +309,8 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @deprecated since 0.96 - Use {@link HTableInterface#batch} instead
|
||||
*/
|
||||
@Deprecated
|
||||
void processBatch(
|
||||
List<? extends Row> actions, final byte[] tableName, ExecutorService pool, Object[] results
|
||||
)
|
||||
public void processBatch(List<? extends Row> actions, final byte[] tableName,
|
||||
ExecutorService pool, Object[] results)
|
||||
throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
|
@ -324,13 +319,11 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @deprecated since 0.96 - Use {@link HTableInterface#batchCallback} instead
|
||||
*/
|
||||
@Deprecated
|
||||
<R> void processBatchCallback(
|
||||
List<? extends Row> list,
|
||||
public <R> void processBatchCallback(List<? extends Row> list,
|
||||
byte[] tableName,
|
||||
ExecutorService pool,
|
||||
Object[] results,
|
||||
Batch.Callback<R> callback
|
||||
) throws IOException, InterruptedException;
|
||||
Batch.Callback<R> callback) throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Enable or disable region cache prefetch for the table. It will be
|
||||
|
@ -339,9 +332,8 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @param tableName name of table to configure.
|
||||
* @param enable Set to true to enable region cache prefetch.
|
||||
*/
|
||||
void setRegionCachePrefetch(
|
||||
final byte[] tableName, final boolean enable
|
||||
);
|
||||
public void setRegionCachePrefetch(final byte[] tableName,
|
||||
final boolean enable);
|
||||
|
||||
/**
|
||||
* Check whether region cache prefetch is enabled or not.
|
||||
|
@ -349,34 +341,34 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @return true if table's region cache prefetch is enabled. Otherwise
|
||||
* it is disabled.
|
||||
*/
|
||||
boolean getRegionCachePrefetch(final byte[] tableName);
|
||||
public boolean getRegionCachePrefetch(final byte[] tableName);
|
||||
|
||||
/**
|
||||
* @return the number of region servers that are currently running
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @deprecated This method will be changed from public to package protected.
|
||||
*/
|
||||
int getCurrentNrHRS() throws IOException;
|
||||
public int getCurrentNrHRS() throws IOException;
|
||||
|
||||
/**
|
||||
* @param tableNames List of table names
|
||||
* @return HTD[] table metadata
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
HTableDescriptor[] getHTableDescriptors(List<String> tableNames)
|
||||
public HTableDescriptor[] getHTableDescriptors(List<String> tableNames)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* @return true if this connection is closed
|
||||
*/
|
||||
boolean isClosed();
|
||||
public boolean isClosed();
|
||||
|
||||
|
||||
/**
|
||||
* Clear any caches that pertain to server name <code>sn</code>.
|
||||
* @param sn A server name
|
||||
*/
|
||||
void clearCaches(final ServerName sn);
|
||||
public void clearCaches(final ServerName sn);
|
||||
|
||||
/**
|
||||
* This function allows HBaseAdmin and potentially others to get a shared MasterMonitor
|
||||
|
@ -385,7 +377,7 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @throws MasterNotRunningException
|
||||
*/
|
||||
// TODO: Why is this in the public interface when the returned type is shutdown package access?
|
||||
MasterMonitorKeepAliveConnection getKeepAliveMasterMonitorService()
|
||||
public MasterMonitorKeepAliveConnection getKeepAliveMasterMonitorService()
|
||||
throws MasterNotRunningException;
|
||||
|
||||
/**
|
||||
|
@ -395,11 +387,11 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @throws MasterNotRunningException
|
||||
*/
|
||||
// TODO: Why is this in the public interface when the returned type is shutdown package access?
|
||||
MasterAdminKeepAliveConnection getKeepAliveMasterAdminService() throws MasterNotRunningException;
|
||||
public MasterAdminKeepAliveConnection getKeepAliveMasterAdminService() throws MasterNotRunningException;
|
||||
|
||||
/**
|
||||
* @param serverName
|
||||
* @return true if the server is known as dead, false otherwise.
|
||||
*/
|
||||
boolean isDeadServer(ServerName serverName);
|
||||
public boolean isDeadServer(ServerName serverName);
|
||||
}
|
||||
|
|
|
@ -127,9 +127,8 @@ public interface HTableInterface extends Closeable {
|
|||
* Same as {@link #batch(List, Object[])}, but with a callback.
|
||||
* @since 0.96.0
|
||||
*/
|
||||
<R> void batchCallback(
|
||||
final List<? extends Row> actions, final Object[] results, final Batch.Callback<R> callback
|
||||
)
|
||||
public <R> void batchCallback(
|
||||
final List<? extends Row> actions, final Object[] results, final Batch.Callback<R> callback)
|
||||
throws IOException, InterruptedException;
|
||||
|
||||
|
||||
|
@ -137,9 +136,8 @@ public interface HTableInterface extends Closeable {
|
|||
* Same as {@link #batch(List)}, but with a callback.
|
||||
* @since 0.96.0
|
||||
*/
|
||||
<R> Object[] batchCallback(
|
||||
List<? extends Row> actions, Batch.Callback<R> callback
|
||||
) throws IOException,
|
||||
public <R> Object[] batchCallback(
|
||||
List<? extends Row> actions, Batch.Callback<R> callback) throws IOException,
|
||||
InterruptedException;
|
||||
|
||||
/**
|
||||
|
@ -311,7 +309,7 @@ public interface HTableInterface extends Closeable {
|
|||
* @param rm object that specifies the set of mutations to perform atomically
|
||||
* @throws IOException
|
||||
*/
|
||||
void mutateRow(final RowMutations rm) throws IOException;
|
||||
public void mutateRow(final RowMutations rm) throws IOException;
|
||||
|
||||
/**
|
||||
* Appends values to one or more columns within a single row.
|
||||
|
@ -326,7 +324,7 @@ public interface HTableInterface extends Closeable {
|
|||
* @throws IOException e
|
||||
* @return values of columns after the append operation (maybe null)
|
||||
*/
|
||||
Result append(final Append append) throws IOException;
|
||||
public Result append(final Append append) throws IOException;
|
||||
|
||||
/**
|
||||
* Increments one or more columns within a single row.
|
||||
|
@ -341,7 +339,7 @@ public interface HTableInterface extends Closeable {
|
|||
* @throws IOException e
|
||||
* @return values of columns after the increment
|
||||
*/
|
||||
Result increment(final Increment increment) throws IOException;
|
||||
public Result increment(final Increment increment) throws IOException;
|
||||
|
||||
/**
|
||||
* See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}
|
||||
|
@ -495,7 +493,7 @@ public interface HTableInterface extends Closeable {
|
|||
* @param autoFlush
|
||||
* Whether or not to enable 'auto-flush'.
|
||||
*/
|
||||
void setAutoFlush(boolean autoFlush);
|
||||
public void setAutoFlush(boolean autoFlush);
|
||||
|
||||
/**
|
||||
* Turns 'auto-flush' on or off.
|
||||
|
@ -524,7 +522,7 @@ public interface HTableInterface extends Closeable {
|
|||
* Whether to keep Put failures in the writeBuffer
|
||||
* @see #flushCommits
|
||||
*/
|
||||
void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail);
|
||||
public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail);
|
||||
|
||||
/**
|
||||
* Returns the maximum size in bytes of the write buffer for this HTable.
|
||||
|
@ -533,7 +531,7 @@ public interface HTableInterface extends Closeable {
|
|||
* {@code hbase.client.write.buffer}.
|
||||
* @return The size of the write buffer in bytes.
|
||||
*/
|
||||
long getWriteBufferSize();
|
||||
public long getWriteBufferSize();
|
||||
|
||||
/**
|
||||
* Sets the size of the buffer in bytes.
|
||||
|
@ -543,5 +541,5 @@ public interface HTableInterface extends Closeable {
|
|||
* @param writeBufferSize The new write buffer size, in bytes.
|
||||
* @throws IOException if a remote or network exception occurs.
|
||||
*/
|
||||
void setWriteBufferSize(long writeBufferSize) throws IOException;
|
||||
public void setWriteBufferSize(long writeBufferSize) throws IOException;
|
||||
}
|
||||
|
|
|
@ -40,5 +40,5 @@ extends MasterAdminProtos.MasterAdminService.BlockingInterface {
|
|||
*/
|
||||
// The Closeable Interface wants to throw an IOE out of a close.
|
||||
// Thats a PITA. Do this below instead of Closeable.
|
||||
void close();
|
||||
public void close();
|
||||
}
|
|
@ -297,7 +297,7 @@ public class MetaScanner {
|
|||
* @return A boolean to know if it should continue to loop in the region
|
||||
* @throws IOException e
|
||||
*/
|
||||
boolean processRow(Result rowResult) throws IOException;
|
||||
public boolean processRow(Result rowResult) throws IOException;
|
||||
}
|
||||
|
||||
public static abstract class MetaScannerVisitorBase implements MetaScannerVisitor {
|
||||
|
|
|
@ -38,17 +38,17 @@ public interface ResultScanner extends Closeable, Iterable<Result> {
|
|||
* exhausted.
|
||||
* @throws IOException e
|
||||
*/
|
||||
Result next() throws IOException;
|
||||
public Result next() throws IOException;
|
||||
|
||||
/**
|
||||
* @param nbRows number of rows to return
|
||||
* @return Between zero and <param>nbRows</param> Results
|
||||
* @throws IOException e
|
||||
*/
|
||||
Result [] next(int nbRows) throws IOException;
|
||||
public Result [] next(int nbRows) throws IOException;
|
||||
|
||||
/**
|
||||
* Closes the scanner and releases any resources it has allocated
|
||||
*/
|
||||
void close();
|
||||
public void close();
|
||||
}
|
||||
|
|
|
@ -30,5 +30,5 @@ public interface Row extends Comparable<Row> {
|
|||
/**
|
||||
* @return The row.
|
||||
*/
|
||||
byte [] getRow();
|
||||
public byte [] getRow();
|
||||
}
|
|
@ -50,8 +50,8 @@ public abstract class Batch {
|
|||
* {@link Batch.Call#call(Object)}
|
||||
* @param <R> the return type from {@link Batch.Call#call(Object)}
|
||||
*/
|
||||
public interface Call<T,R> {
|
||||
R call(T instance) throws IOException;
|
||||
public static interface Call<T,R> {
|
||||
public R call(T instance) throws IOException;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -68,7 +68,7 @@ public abstract class Batch {
|
|||
* @param <R> the return type from the associated {@link Batch.Call#call(Object)}
|
||||
* @see org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)
|
||||
*/
|
||||
public interface Callback<R> {
|
||||
void update(byte[] region, byte[] row, R result);
|
||||
public static interface Callback<R> {
|
||||
public void update(byte[] region, byte[] row, R result);
|
||||
}
|
||||
}
|
|
@ -46,7 +46,7 @@ public interface ReplicationPeers {
|
|||
* Initialize the ReplicationPeers interface.
|
||||
* @throws KeeperException
|
||||
*/
|
||||
void init() throws IOException, KeeperException;
|
||||
public void init() throws IOException, KeeperException;
|
||||
|
||||
/**
|
||||
* Add a new remote slave cluster for replication.
|
||||
|
@ -54,65 +54,65 @@ public interface ReplicationPeers {
|
|||
* @param clusterKey the concatenation of the slave cluster's:
|
||||
* hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
|
||||
*/
|
||||
void addPeer(String peerId, String clusterKey) throws IOException;
|
||||
public void addPeer(String peerId, String clusterKey) throws IOException;
|
||||
|
||||
/**
|
||||
* Removes a remote slave cluster and stops the replication to it.
|
||||
* @param peerId a short that identifies the cluster
|
||||
*/
|
||||
void removePeer(String peerId) throws IOException;
|
||||
public void removePeer(String peerId) throws IOException;
|
||||
|
||||
/**
|
||||
* Restart the replication to the specified remote slave cluster.
|
||||
* @param peerId a short that identifies the cluster
|
||||
*/
|
||||
void enablePeer(String peerId) throws IOException;
|
||||
public void enablePeer(String peerId) throws IOException;
|
||||
|
||||
/**
|
||||
* Stop the replication to the specified remote slave cluster.
|
||||
* @param peerId a short that identifies the cluster
|
||||
*/
|
||||
void disablePeer(String peerId) throws IOException;
|
||||
public void disablePeer(String peerId) throws IOException;
|
||||
|
||||
/**
|
||||
* Get the replication status for the specified connected remote slave cluster.
|
||||
* @param peerId a short that identifies the cluster
|
||||
* @return true if replication is enabled, false otherwise.
|
||||
*/
|
||||
boolean getStatusOfConnectedPeer(String peerId);
|
||||
public boolean getStatusOfConnectedPeer(String peerId);
|
||||
|
||||
/**
|
||||
* Get a set of all connected remote slave clusters.
|
||||
* @return set of peer ids
|
||||
*/
|
||||
Set<String> getConnectedPeers();
|
||||
public Set<String> getConnectedPeers();
|
||||
|
||||
/**
|
||||
* List the cluster keys of all remote slave clusters (whether they are enabled/disabled or
|
||||
* connected/disconnected).
|
||||
* @return A map of peer ids to peer cluster keys
|
||||
*/
|
||||
Map<String, String> getAllPeerClusterKeys();
|
||||
public Map<String, String> getAllPeerClusterKeys();
|
||||
|
||||
/**
|
||||
* List the peer ids of all remote slave clusters (whether they are enabled/disabled or
|
||||
* connected/disconnected).
|
||||
* @return A list of peer ids
|
||||
*/
|
||||
List<String> getAllPeerIds();
|
||||
public List<String> getAllPeerIds();
|
||||
|
||||
/**
|
||||
* Attempt to connect to a new remote slave cluster.
|
||||
* @param peerId a short that identifies the cluster
|
||||
* @return true if a new connection was made, false if no new connection was made.
|
||||
*/
|
||||
boolean connectToPeer(String peerId) throws IOException, KeeperException;
|
||||
public boolean connectToPeer(String peerId) throws IOException, KeeperException;
|
||||
|
||||
/**
|
||||
* Disconnect from a remote slave cluster.
|
||||
* @param peerId a short that identifies the cluster
|
||||
*/
|
||||
void disconnectFromPeer(String peerId);
|
||||
public void disconnectFromPeer(String peerId);
|
||||
|
||||
/**
|
||||
* Returns all region servers from given connected remote slave cluster.
|
||||
|
@ -120,19 +120,19 @@ public interface ReplicationPeers {
|
|||
* @return addresses of all region servers in the peer cluster. Returns an empty list if the peer
|
||||
* cluster is unavailable or there are no region servers in the cluster.
|
||||
*/
|
||||
List<ServerName> getRegionServersOfConnectedPeer(String peerId);
|
||||
public List<ServerName> getRegionServersOfConnectedPeer(String peerId);
|
||||
|
||||
/**
|
||||
* Returns the UUID of the provided peer id.
|
||||
* @param peerId the peer's ID that will be converted into a UUID
|
||||
* @return a UUID or null if the peer cluster does not exist or is not connected.
|
||||
*/
|
||||
UUID getPeerUUID(String peerId);
|
||||
public UUID getPeerUUID(String peerId);
|
||||
|
||||
/**
|
||||
* Returns the configuration needed to talk to the remote slave cluster.
|
||||
* @param peerId a short that identifies the cluster
|
||||
* @return the configuration for the peer cluster, null if it was unable to get the configuration
|
||||
*/
|
||||
Configuration getPeerConf(String peerId) throws KeeperException;
|
||||
public Configuration getPeerConf(String peerId) throws KeeperException;
|
||||
}
|
|
@ -37,13 +37,13 @@ public interface ReplicationQueues {
|
|||
* @param serverName The server name of the region server that owns the replication queues this
|
||||
* interface manages.
|
||||
*/
|
||||
void init(String serverName) throws KeeperException;
|
||||
public void init(String serverName) throws KeeperException;
|
||||
|
||||
/**
|
||||
* Remove a replication queue.
|
||||
* @param queueId a String that identifies the queue.
|
||||
*/
|
||||
void removeQueue(String queueId);
|
||||
public void removeQueue(String queueId);
|
||||
|
||||
/**
|
||||
* Add a new HLog file to the given queue. If the queue does not exist it is created.
|
||||
|
@ -51,14 +51,14 @@ public interface ReplicationQueues {
|
|||
* @param filename name of the HLog
|
||||
* @throws KeeperException
|
||||
*/
|
||||
void addLog(String queueId, String filename) throws KeeperException;
|
||||
public void addLog(String queueId, String filename) throws KeeperException;
|
||||
|
||||
/**
|
||||
* Remove an HLog file from the given queue.
|
||||
* @param queueId a String that identifies the queue.
|
||||
* @param filename name of the HLog
|
||||
*/
|
||||
void removeLog(String queueId, String filename);
|
||||
public void removeLog(String queueId, String filename);
|
||||
|
||||
/**
|
||||
* Set the current position for a specific HLog in a given queue.
|
||||
|
@ -66,7 +66,7 @@ public interface ReplicationQueues {
|
|||
* @param filename name of the HLog
|
||||
* @param position the current position in the file
|
||||
*/
|
||||
void setLogPosition(String queueId, String filename, long position);
|
||||
public void setLogPosition(String queueId, String filename, long position);
|
||||
|
||||
/**
|
||||
* Get the current position for a specific HLog in a given queue.
|
||||
|
@ -74,25 +74,25 @@ public interface ReplicationQueues {
|
|||
* @param filename name of the HLog
|
||||
* @return the current position in the file
|
||||
*/
|
||||
long getLogPosition(String queueId, String filename) throws KeeperException;
|
||||
public long getLogPosition(String queueId, String filename) throws KeeperException;
|
||||
|
||||
/**
|
||||
* Remove all replication queues for this region server.
|
||||
*/
|
||||
void removeAllQueues();
|
||||
public void removeAllQueues();
|
||||
|
||||
/**
|
||||
* Get a list of all HLogs in the given queue.
|
||||
* @param queueId a String that identifies the queue
|
||||
* @return a list of HLogs, null if this region server is dead and has no outstanding queues
|
||||
*/
|
||||
List<String> getLogsInQueue(String queueId);
|
||||
public List<String> getLogsInQueue(String queueId);
|
||||
|
||||
/**
|
||||
* Get a list of all queues for this region server.
|
||||
* @return a list of queueIds, null if this region server is dead and has no outstanding queues
|
||||
*/
|
||||
List<String> getAllQueues();
|
||||
public List<String> getAllQueues();
|
||||
|
||||
/**
|
||||
* Take ownership for the set of queues belonging to a dead region server.
|
||||
|
@ -100,12 +100,12 @@ public interface ReplicationQueues {
|
|||
* @return A SortedMap of the queues that have been claimed, including a SortedSet of HLogs in
|
||||
* each queue. Returns an empty map if no queues were failed-over.
|
||||
*/
|
||||
SortedMap<String, SortedSet<String>> claimQueues(String regionserver);
|
||||
public SortedMap<String, SortedSet<String>> claimQueues(String regionserver);
|
||||
|
||||
/**
|
||||
* Get a list of all region servers that have outstanding replication queues. These servers could
|
||||
* be alive, dead or from a previous run of the cluster.
|
||||
* @return a list of server names
|
||||
*/
|
||||
List<String> getListOfReplicators();
|
||||
public List<String> getListOfReplicators();
|
||||
}
|
|
@ -31,7 +31,7 @@ public interface ReplicationQueuesClient {
|
|||
* be alive, dead or from a previous run of the cluster.
|
||||
* @return a list of server names
|
||||
*/
|
||||
List<String> getListOfReplicators();
|
||||
public List<String> getListOfReplicators();
|
||||
|
||||
/**
|
||||
* Get a list of all HLogs in the given queue on the given region server.
|
||||
|
@ -39,12 +39,12 @@ public interface ReplicationQueuesClient {
|
|||
* @param queueId a String that identifies the queue
|
||||
* @return a list of HLogs, null if this region server is dead and has no outstanding queues
|
||||
*/
|
||||
List<String> getLogsInQueue(String serverName, String queueId);
|
||||
public List<String> getLogsInQueue(String serverName, String queueId);
|
||||
|
||||
/**
|
||||
* Get a list of all queues for the specified region server.
|
||||
* @param serverName the server name of the region server that owns the set of queues
|
||||
* @return a list of queueIds, null if this region server is not a replicator.
|
||||
*/
|
||||
List<String> getAllQueues(String serverName);
|
||||
public List<String> getAllQueues(String serverName);
|
||||
}
|
|
@ -216,17 +216,17 @@ public class PoolMap<K, V> implements Map<K, V> {
|
|||
}
|
||||
|
||||
protected interface Pool<R> {
|
||||
R get();
|
||||
public R get();
|
||||
|
||||
R put(R resource);
|
||||
public R put(R resource);
|
||||
|
||||
boolean remove(R resource);
|
||||
public boolean remove(R resource);
|
||||
|
||||
void clear();
|
||||
public void clear();
|
||||
|
||||
Collection<R> values();
|
||||
public Collection<R> values();
|
||||
|
||||
int size();
|
||||
public int size();
|
||||
}
|
||||
|
||||
public enum PoolType {
|
||||
|
|
|
@ -66,7 +66,7 @@ public class CompoundConfiguration extends Configuration {
|
|||
|
||||
// Devs: these APIs are the same contract as their counterparts in
|
||||
// Configuration.java
|
||||
private interface ImmutableConfigMap extends Iterable<Map.Entry<String,String>> {
|
||||
private static interface ImmutableConfigMap extends Iterable<Map.Entry<String,String>> {
|
||||
String get(String key);
|
||||
String getRaw(String key);
|
||||
Class<?> getClassByName(String name) throws ClassNotFoundException;
|
||||
|
|
|
@ -2514,14 +2514,14 @@ public class KeyValue implements Cell, HeapSize, Cloneable {
|
|||
/**
|
||||
* Avoids redundant comparisons for better performance.
|
||||
*/
|
||||
public interface SamePrefixComparator<T> {
|
||||
public static interface SamePrefixComparator<T> {
|
||||
/**
|
||||
* Compare two keys assuming that the first n bytes are the same.
|
||||
* @param commonPrefix How many bytes are the same.
|
||||
*/
|
||||
int compareIgnoringPrefix(
|
||||
int commonPrefix, T left, int loffset, int llength, T right, int roffset, int rlength
|
||||
);
|
||||
public int compareIgnoringPrefix(int commonPrefix,
|
||||
T left, int loffset, int llength,
|
||||
T right, int roffset, int rlength);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -37,14 +37,14 @@ public interface Codec {
|
|||
* Call flush when done. Some encoders may not put anything on the stream until flush is called.
|
||||
* On flush, let go of any resources used by the encoder.
|
||||
*/
|
||||
interface Encoder extends CellOutputStream {}
|
||||
public interface Encoder extends CellOutputStream {}
|
||||
|
||||
/**
|
||||
* Implementations should implicitly clean up any resources allocated when the
|
||||
* Decoder/CellScanner runs off the end of the cell block. Do this rather than require the user
|
||||
* call close explicitly.
|
||||
*/
|
||||
interface Decoder extends CellScanner {};
|
||||
public interface Decoder extends CellScanner {};
|
||||
|
||||
Decoder getDecoder(InputStream is);
|
||||
Encoder getEncoder(OutputStream os);
|
||||
|
|
|
@ -45,5 +45,5 @@ public interface HeapSize {
|
|||
* @return Approximate 'exclusive deep size' of implementing object. Includes
|
||||
* count of payload and hosting object sizings.
|
||||
*/
|
||||
long heapSize();
|
||||
public long heapSize();
|
||||
}
|
|
@ -57,9 +57,9 @@ public interface DataBlockEncoder {
|
|||
* @throws IOException
|
||||
* If there is an error writing to output stream.
|
||||
*/
|
||||
void encodeKeyValues(
|
||||
ByteBuffer in, boolean includesMemstoreTS, HFileBlockEncodingContext encodingContext
|
||||
) throws IOException;
|
||||
public void encodeKeyValues(
|
||||
ByteBuffer in, boolean includesMemstoreTS,
|
||||
HFileBlockEncodingContext encodingContext) throws IOException;
|
||||
|
||||
/**
|
||||
* Decode.
|
||||
|
@ -69,9 +69,8 @@ public interface DataBlockEncoder {
|
|||
* @return Uncompressed block of KeyValues.
|
||||
* @throws IOException If there is an error in source.
|
||||
*/
|
||||
ByteBuffer decodeKeyValues(
|
||||
DataInputStream source, boolean includesMemstoreTS
|
||||
) throws IOException;
|
||||
public ByteBuffer decodeKeyValues(DataInputStream source,
|
||||
boolean includesMemstoreTS) throws IOException;
|
||||
|
||||
/**
|
||||
* Uncompress.
|
||||
|
@ -83,9 +82,8 @@ public interface DataBlockEncoder {
|
|||
* @return Uncompressed block of KeyValues.
|
||||
* @throws IOException If there is an error in source.
|
||||
*/
|
||||
ByteBuffer decodeKeyValues(
|
||||
DataInputStream source, int allocateHeaderLength, int skipLastBytes, boolean includesMemstoreTS
|
||||
)
|
||||
public ByteBuffer decodeKeyValues(DataInputStream source,
|
||||
int allocateHeaderLength, int skipLastBytes, boolean includesMemstoreTS)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -96,7 +94,7 @@ public interface DataBlockEncoder {
|
|||
* @param block encoded block we want index, the position will not change
|
||||
* @return First key in block.
|
||||
*/
|
||||
ByteBuffer getFirstKeyInBlock(ByteBuffer block);
|
||||
public ByteBuffer getFirstKeyInBlock(ByteBuffer block);
|
||||
|
||||
/**
|
||||
* Create a HFileBlock seeker which find KeyValues within a block.
|
||||
|
@ -105,9 +103,8 @@ public interface DataBlockEncoder {
|
|||
* key-value pair
|
||||
* @return A newly created seeker.
|
||||
*/
|
||||
EncodedSeeker createSeeker(
|
||||
RawComparator<byte[]> comparator, boolean includesMemstoreTS
|
||||
);
|
||||
public EncodedSeeker createSeeker(RawComparator<byte[]> comparator,
|
||||
boolean includesMemstoreTS);
|
||||
|
||||
/**
|
||||
* Creates a encoder specific encoding context
|
||||
|
@ -122,9 +119,9 @@ public interface DataBlockEncoder {
|
|||
* is unknown
|
||||
* @return a newly created encoding context
|
||||
*/
|
||||
HFileBlockEncodingContext newDataBlockEncodingContext(
|
||||
Algorithm compressionAlgorithm, DataBlockEncoding encoding, byte[] headerBytes
|
||||
);
|
||||
public HFileBlockEncodingContext newDataBlockEncodingContext(
|
||||
Algorithm compressionAlgorithm, DataBlockEncoding encoding,
|
||||
byte[] headerBytes);
|
||||
|
||||
/**
|
||||
* Creates an encoder specific decoding context, which will prepare the data
|
||||
|
@ -134,9 +131,8 @@ public interface DataBlockEncoder {
|
|||
* compression algorithm used if the data needs to be decompressed
|
||||
* @return a newly created decoding context
|
||||
*/
|
||||
HFileBlockDecodingContext newDataBlockDecodingContext(
|
||||
Algorithm compressionAlgorithm
|
||||
);
|
||||
public HFileBlockDecodingContext newDataBlockDecodingContext(
|
||||
Algorithm compressionAlgorithm);
|
||||
|
||||
/**
|
||||
* An interface which enable to seek while underlying data is encoded.
|
||||
|
@ -144,19 +140,19 @@ public interface DataBlockEncoder {
|
|||
* It works on one HFileBlock, but it is reusable. See
|
||||
* {@link #setCurrentBuffer(ByteBuffer)}.
|
||||
*/
|
||||
interface EncodedSeeker {
|
||||
public static interface EncodedSeeker {
|
||||
/**
|
||||
* Set on which buffer there will be done seeking.
|
||||
* @param buffer Used for seeking.
|
||||
*/
|
||||
void setCurrentBuffer(ByteBuffer buffer);
|
||||
public void setCurrentBuffer(ByteBuffer buffer);
|
||||
|
||||
/**
|
||||
* Does a deep copy of the key at the current position. A deep copy is
|
||||
* necessary because buffers are reused in the decoder.
|
||||
* @return key at current position
|
||||
*/
|
||||
ByteBuffer getKeyDeepCopy();
|
||||
public ByteBuffer getKeyDeepCopy();
|
||||
|
||||
/**
|
||||
* Does a shallow copy of the value at the current position. A shallow
|
||||
|
@ -164,25 +160,25 @@ public interface DataBlockEncoder {
|
|||
* of the original encoded buffer.
|
||||
* @return value at current position
|
||||
*/
|
||||
ByteBuffer getValueShallowCopy();
|
||||
public ByteBuffer getValueShallowCopy();
|
||||
|
||||
/** @return key value at current position with position set to limit */
|
||||
ByteBuffer getKeyValueBuffer();
|
||||
public ByteBuffer getKeyValueBuffer();
|
||||
|
||||
/**
|
||||
* @return the KeyValue object at the current position. Includes memstore
|
||||
* timestamp.
|
||||
*/
|
||||
KeyValue getKeyValue();
|
||||
public KeyValue getKeyValue();
|
||||
|
||||
/** Set position to beginning of given block */
|
||||
void rewind();
|
||||
public void rewind();
|
||||
|
||||
/**
|
||||
* Move to next position
|
||||
* @return true on success, false if there is no more positions.
|
||||
*/
|
||||
boolean next();
|
||||
public boolean next();
|
||||
|
||||
/**
|
||||
* Moves the seeker position within the current block to:
|
||||
|
@ -201,8 +197,7 @@ public interface DataBlockEncoder {
|
|||
* of an exact match. Does not matter in case of an inexact match.
|
||||
* @return 0 on exact match, 1 on inexact match.
|
||||
*/
|
||||
int seekToKeyInBlock(
|
||||
byte[] key, int offset, int length, boolean seekBefore
|
||||
);
|
||||
public int seekToKeyInBlock(byte[] key, int offset, int length,
|
||||
boolean seekBefore);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ public interface HFileBlockDecodingContext {
|
|||
/**
|
||||
* @return the compression algorithm used by this decoding context
|
||||
*/
|
||||
Compression.Algorithm getCompression();
|
||||
public Compression.Algorithm getCompression();
|
||||
|
||||
/**
|
||||
* Perform all actions that need to be done before the encoder's real decoding process.
|
||||
|
@ -47,12 +47,7 @@ public interface HFileBlockDecodingContext {
|
|||
* @param offset data start offset in onDiskBlock
|
||||
* @throws IOException
|
||||
*/
|
||||
void prepareDecoding(
|
||||
int onDiskSizeWithoutHeader,
|
||||
int uncompressedSizeWithoutHeader,
|
||||
ByteBuffer blockBufferWithoutHeader,
|
||||
byte[] onDiskBlock,
|
||||
int offset
|
||||
) throws IOException;
|
||||
public void prepareDecoding(int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader,
|
||||
ByteBuffer blockBufferWithoutHeader, byte[] onDiskBlock, int offset) throws IOException;
|
||||
|
||||
}
|
||||
|
|
|
@ -34,39 +34,39 @@ public interface HFileBlockEncodingContext {
|
|||
/**
|
||||
* @return OutputStream to which encoded data is written
|
||||
*/
|
||||
OutputStream getOutputStreamForEncoder();
|
||||
public OutputStream getOutputStreamForEncoder();
|
||||
|
||||
/**
|
||||
* @return encoded and compressed bytes with header which are ready to write
|
||||
* out to disk
|
||||
*/
|
||||
byte[] getOnDiskBytesWithHeader();
|
||||
public byte[] getOnDiskBytesWithHeader();
|
||||
|
||||
/**
|
||||
* @return encoded but not heavily compressed bytes with header which can be
|
||||
* cached in block cache
|
||||
*/
|
||||
byte[] getUncompressedBytesWithHeader();
|
||||
public byte[] getUncompressedBytesWithHeader();
|
||||
|
||||
/**
|
||||
* @return the block type after encoding
|
||||
*/
|
||||
BlockType getBlockType();
|
||||
public BlockType getBlockType();
|
||||
|
||||
/**
|
||||
* @return the compression algorithm used by this encoding context
|
||||
*/
|
||||
Compression.Algorithm getCompression();
|
||||
public Compression.Algorithm getCompression();
|
||||
|
||||
/**
|
||||
* sets the dummy header bytes
|
||||
*/
|
||||
void setDummyHeader(byte[] headerBytes);
|
||||
public void setDummyHeader(byte[] headerBytes);
|
||||
|
||||
/**
|
||||
* @return the {@link DataBlockEncoding} encoding used
|
||||
*/
|
||||
DataBlockEncoding getDataBlockEncoding();
|
||||
public DataBlockEncoding getDataBlockEncoding();
|
||||
|
||||
/**
|
||||
* Do any action that needs to be performed after the encoding.
|
||||
|
@ -76,11 +76,11 @@ public interface HFileBlockEncodingContext {
|
|||
* @param blockType
|
||||
* @throws IOException
|
||||
*/
|
||||
void postEncoding(BlockType blockType) throws IOException;
|
||||
public void postEncoding(BlockType blockType) throws IOException;
|
||||
|
||||
/**
|
||||
* Releases the resources used.
|
||||
*/
|
||||
void close();
|
||||
public void close();
|
||||
|
||||
}
|
||||
|
|
|
@ -1000,9 +1000,8 @@ public class Bytes {
|
|||
}
|
||||
|
||||
interface Comparer<T> {
|
||||
int compareTo(
|
||||
T buffer1, int offset1, int length1, T buffer2, int offset2, int length2
|
||||
);
|
||||
abstract public int compareTo(T buffer1, int offset1, int length1,
|
||||
T buffer2, int offset2, int length2);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
|
|
|
@ -50,16 +50,16 @@ public class ClassFinder {
|
|||
private ClassFilter classFilter;
|
||||
private FileFilter fileFilter;
|
||||
|
||||
public interface ResourcePathFilter {
|
||||
boolean isCandidatePath(String resourcePath, boolean isJar);
|
||||
public static interface ResourcePathFilter {
|
||||
public boolean isCandidatePath(String resourcePath, boolean isJar);
|
||||
};
|
||||
|
||||
public interface FileNameFilter {
|
||||
boolean isCandidateFile(String fileName, String absFilePath);
|
||||
public static interface FileNameFilter {
|
||||
public boolean isCandidateFile(String fileName, String absFilePath);
|
||||
};
|
||||
|
||||
public interface ClassFilter {
|
||||
boolean isCandidateClass(Class<?> c);
|
||||
public static interface ClassFilter {
|
||||
public boolean isCandidateClass(Class<?> c);
|
||||
};
|
||||
|
||||
public ClassFinder() {
|
||||
|
|
|
@ -86,14 +86,14 @@ public final class Waiter {
|
|||
* {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate) methods.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface Predicate<E extends Exception> {
|
||||
public static interface Predicate<E extends Exception> {
|
||||
|
||||
/**
|
||||
* Perform a predicate evaluation.
|
||||
* @return the boolean result of the evaluation.
|
||||
* @throws Exception thrown if the predicate evaluation could not evaluate.
|
||||
*/
|
||||
boolean evaluate() throws E;
|
||||
public boolean evaluate() throws E;
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -22,37 +22,37 @@ package org.apache.hadoop.hbase.ipc;
|
|||
import org.apache.hadoop.hbase.metrics.BaseSource;
|
||||
|
||||
public interface MetricsHBaseServerSource extends BaseSource {
|
||||
String AUTHORIZATION_SUCCESSES_NAME = "authorizationSuccesses";
|
||||
String AUTHORIZATION_SUCCESSES_DESC =
|
||||
public static final String AUTHORIZATION_SUCCESSES_NAME = "authorizationSuccesses";
|
||||
public static final String AUTHORIZATION_SUCCESSES_DESC =
|
||||
"Number of authorization successes.";
|
||||
String AUTHORIZATION_FAILURES_NAME = "authorizationFailures";
|
||||
String AUTHORIZATION_FAILURES_DESC =
|
||||
public static final String AUTHORIZATION_FAILURES_NAME = "authorizationFailures";
|
||||
public static final String AUTHORIZATION_FAILURES_DESC =
|
||||
"Number of authorization failures.";
|
||||
String AUTHENTICATION_SUCCESSES_NAME = "authenticationSuccesses";
|
||||
String AUTHENTICATION_SUCCESSES_DESC =
|
||||
public static final String AUTHENTICATION_SUCCESSES_NAME = "authenticationSuccesses";
|
||||
public static final String AUTHENTICATION_SUCCESSES_DESC =
|
||||
"Number of authentication successes.";
|
||||
String AUTHENTICATION_FAILURES_NAME = "authenticationFailures";
|
||||
String AUTHENTICATION_FAILURES_DESC =
|
||||
public static final String AUTHENTICATION_FAILURES_NAME = "authenticationFailures";
|
||||
public static final String AUTHENTICATION_FAILURES_DESC =
|
||||
"Number of authentication failures.";
|
||||
String SENT_BYTES_NAME = "sentBytes";
|
||||
String SENT_BYTES_DESC = "Number of bytes sent.";
|
||||
String RECEIVED_BYTES_NAME = "receivedBytes";
|
||||
String RECEIVED_BYTES_DESC = "Number of bytes received.";
|
||||
String QUEUE_CALL_TIME_NAME = "queueCallTime";
|
||||
String QUEUE_CALL_TIME_DESC = "Queue Call Time.";
|
||||
String PROCESS_CALL_TIME_NAME = "processCallTime";
|
||||
String PROCESS_CALL_TIME_DESC = "Processing call time.";
|
||||
String QUEUE_SIZE_NAME = "queueSize";
|
||||
String QUEUE_SIZE_DESC = "Number of bytes in the call queues.";
|
||||
String GENERAL_QUEUE_NAME = "numCallsInGeneralQueue";
|
||||
String GENERAL_QUEUE_DESC = "Number of calls in the general call queue.";
|
||||
String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue";
|
||||
String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue";
|
||||
String REPLICATION_QUEUE_DESC =
|
||||
public static final String SENT_BYTES_NAME = "sentBytes";
|
||||
public static final String SENT_BYTES_DESC = "Number of bytes sent.";
|
||||
public static final String RECEIVED_BYTES_NAME = "receivedBytes";
|
||||
public static final String RECEIVED_BYTES_DESC = "Number of bytes received.";
|
||||
public static final String QUEUE_CALL_TIME_NAME = "queueCallTime";
|
||||
public static final String QUEUE_CALL_TIME_DESC = "Queue Call Time.";
|
||||
public static final String PROCESS_CALL_TIME_NAME = "processCallTime";
|
||||
public static final String PROCESS_CALL_TIME_DESC = "Processing call time.";
|
||||
public static final String QUEUE_SIZE_NAME = "queueSize";
|
||||
public static final String QUEUE_SIZE_DESC = "Number of bytes in the call queues.";
|
||||
public static final String GENERAL_QUEUE_NAME = "numCallsInGeneralQueue";
|
||||
public static final String GENERAL_QUEUE_DESC = "Number of calls in the general call queue.";
|
||||
public static final String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue";
|
||||
public static final String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue";
|
||||
public static final String REPLICATION_QUEUE_DESC =
|
||||
"Number of calls in the replication call queue.";
|
||||
String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue.";
|
||||
String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections";
|
||||
String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections.";
|
||||
public static final String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue.";
|
||||
public static final String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections";
|
||||
public static final String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections.";
|
||||
|
||||
void authorizationSuccess();
|
||||
|
||||
|
|
|
@ -28,60 +28,60 @@ public interface MetricsMasterSource extends BaseSource {
|
|||
/**
|
||||
* The name of the metrics
|
||||
*/
|
||||
String METRICS_NAME = "Server";
|
||||
static final String METRICS_NAME = "Server";
|
||||
|
||||
/**
|
||||
* The context metrics will be under.
|
||||
*/
|
||||
String METRICS_CONTEXT = "master";
|
||||
static final String METRICS_CONTEXT = "master";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under in jmx
|
||||
*/
|
||||
String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME;
|
||||
static final String METRICS_JMX_CONTEXT = "Master,sub=" + METRICS_NAME;
|
||||
|
||||
/**
|
||||
* Description
|
||||
*/
|
||||
String METRICS_DESCRIPTION = "Metrics about HBase master server";
|
||||
static final String METRICS_DESCRIPTION = "Metrics about HBase master server";
|
||||
|
||||
// Strings used for exporting to metrics system.
|
||||
String MASTER_ACTIVE_TIME_NAME = "masterActiveTime";
|
||||
String MASTER_START_TIME_NAME = "masterStartTime";
|
||||
String AVERAGE_LOAD_NAME = "averageLoad";
|
||||
String NUM_REGION_SERVERS_NAME = "numRegionServers";
|
||||
String NUM_DEAD_REGION_SERVERS_NAME = "numDeadRegionServers";
|
||||
String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum";
|
||||
String SERVER_NAME_NAME = "serverName";
|
||||
String CLUSTER_ID_NAME = "clusterId";
|
||||
String IS_ACTIVE_MASTER_NAME = "isActiveMaster";
|
||||
String SPLIT_TIME_NAME = "hlogSplitTime";
|
||||
String SPLIT_SIZE_NAME = "hlogSplitSize";
|
||||
String SNAPSHOT_TIME_NAME = "snapshotTime";
|
||||
String SNAPSHOT_RESTORE_TIME_NAME = "snapshotRestoreTime";
|
||||
String SNAPSHOT_CLONE_TIME_NAME = "snapshotCloneTime";
|
||||
String META_SPLIT_TIME_NAME = "metaHlogSplitTime";
|
||||
String META_SPLIT_SIZE_NAME = "metaHlogSplitSize";
|
||||
String CLUSTER_REQUESTS_NAME = "clusterRequests";
|
||||
String RIT_COUNT_NAME = "ritCount";
|
||||
String RIT_COUNT_OVER_THRESHOLD_NAME = "ritCountOverThreshold";
|
||||
String RIT_OLDEST_AGE_NAME = "ritOldestAge";
|
||||
String MASTER_ACTIVE_TIME_DESC = "Master Active Time";
|
||||
String MASTER_START_TIME_DESC = "Master Start Time";
|
||||
String AVERAGE_LOAD_DESC = "AverageLoad";
|
||||
String NUMBER_OF_REGION_SERVERS_DESC = "Number of RegionServers";
|
||||
String NUMBER_OF_DEAD_REGION_SERVERS_DESC = "Number of dead RegionServers";
|
||||
String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum";
|
||||
String SERVER_NAME_DESC = "Server Name";
|
||||
String CLUSTER_ID_DESC = "Cluster Id";
|
||||
String IS_ACTIVE_MASTER_DESC = "Is Active Master";
|
||||
String SPLIT_TIME_DESC = "Time it takes to finish HLog.splitLog()";
|
||||
String SPLIT_SIZE_DESC = "Size of HLog files being split";
|
||||
String SNAPSHOT_TIME_DESC = "Time it takes to finish snapshot()";
|
||||
String SNAPSHOT_RESTORE_TIME_DESC = "Time it takes to finish restoreSnapshot()";
|
||||
String SNAPSHOT_CLONE_TIME_DESC = "Time it takes to finish cloneSnapshot()";
|
||||
String META_SPLIT_TIME_DESC = "Time it takes to finish splitMetaLog()";
|
||||
String META_SPLIT_SIZE_DESC = "Size of META HLog files being split";
|
||||
static final String MASTER_ACTIVE_TIME_NAME = "masterActiveTime";
|
||||
static final String MASTER_START_TIME_NAME = "masterStartTime";
|
||||
static final String AVERAGE_LOAD_NAME = "averageLoad";
|
||||
static final String NUM_REGION_SERVERS_NAME = "numRegionServers";
|
||||
static final String NUM_DEAD_REGION_SERVERS_NAME = "numDeadRegionServers";
|
||||
static final String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum";
|
||||
static final String SERVER_NAME_NAME = "serverName";
|
||||
static final String CLUSTER_ID_NAME = "clusterId";
|
||||
static final String IS_ACTIVE_MASTER_NAME = "isActiveMaster";
|
||||
static final String SPLIT_TIME_NAME = "hlogSplitTime";
|
||||
static final String SPLIT_SIZE_NAME = "hlogSplitSize";
|
||||
static final String SNAPSHOT_TIME_NAME = "snapshotTime";
|
||||
static final String SNAPSHOT_RESTORE_TIME_NAME = "snapshotRestoreTime";
|
||||
static final String SNAPSHOT_CLONE_TIME_NAME = "snapshotCloneTime";
|
||||
static final String META_SPLIT_TIME_NAME = "metaHlogSplitTime";
|
||||
static final String META_SPLIT_SIZE_NAME = "metaHlogSplitSize";
|
||||
static final String CLUSTER_REQUESTS_NAME = "clusterRequests";
|
||||
static final String RIT_COUNT_NAME = "ritCount";
|
||||
static final String RIT_COUNT_OVER_THRESHOLD_NAME = "ritCountOverThreshold";
|
||||
static final String RIT_OLDEST_AGE_NAME = "ritOldestAge";
|
||||
static final String MASTER_ACTIVE_TIME_DESC = "Master Active Time";
|
||||
static final String MASTER_START_TIME_DESC = "Master Start Time";
|
||||
static final String AVERAGE_LOAD_DESC = "AverageLoad";
|
||||
static final String NUMBER_OF_REGION_SERVERS_DESC = "Number of RegionServers";
|
||||
static final String NUMBER_OF_DEAD_REGION_SERVERS_DESC = "Number of dead RegionServers";
|
||||
static final String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum";
|
||||
static final String SERVER_NAME_DESC = "Server Name";
|
||||
static final String CLUSTER_ID_DESC = "Cluster Id";
|
||||
static final String IS_ACTIVE_MASTER_DESC = "Is Active Master";
|
||||
static final String SPLIT_TIME_DESC = "Time it takes to finish HLog.splitLog()";
|
||||
static final String SPLIT_SIZE_DESC = "Size of HLog files being split";
|
||||
static final String SNAPSHOT_TIME_DESC = "Time it takes to finish snapshot()";
|
||||
static final String SNAPSHOT_RESTORE_TIME_DESC = "Time it takes to finish restoreSnapshot()";
|
||||
static final String SNAPSHOT_CLONE_TIME_DESC = "Time it takes to finish cloneSnapshot()";
|
||||
static final String META_SPLIT_TIME_DESC = "Time it takes to finish splitMetaLog()";
|
||||
static final String META_SPLIT_SIZE_DESC = "Size of META HLog files being split";
|
||||
|
||||
/**
|
||||
* Increment the number of requests the cluster has seen.
|
||||
|
|
|
@ -24,7 +24,7 @@ package org.apache.hadoop.hbase.metrics;
|
|||
*/
|
||||
public interface BaseSource {
|
||||
|
||||
String HBASE_METRICS_SYSTEM_NAME = "HBase";
|
||||
public static final String HBASE_METRICS_SYSTEM_NAME = "HBase";
|
||||
|
||||
/**
|
||||
* Clear out the metrics and re-prepare the source.
|
||||
|
|
|
@ -29,22 +29,22 @@ public interface MetricsRegionAggregateSource extends BaseSource {
|
|||
/**
|
||||
* The name of the metrics
|
||||
*/
|
||||
String METRICS_NAME = "Regions";
|
||||
static final String METRICS_NAME = "Regions";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under.
|
||||
*/
|
||||
String METRICS_CONTEXT = "regionserver";
|
||||
static final String METRICS_CONTEXT = "regionserver";
|
||||
|
||||
/**
|
||||
* Description
|
||||
*/
|
||||
String METRICS_DESCRIPTION = "Metrics about HBase RegionServer regions and tables";
|
||||
static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer regions and tables";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under in jmx
|
||||
*/
|
||||
String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
|
||||
static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
|
||||
|
||||
/**
|
||||
* Register a MetricsRegionSource as being open.
|
||||
|
|
|
@ -28,22 +28,22 @@ public interface MetricsRegionServerSource extends BaseSource {
|
|||
/**
|
||||
* The name of the metrics
|
||||
*/
|
||||
String METRICS_NAME = "Server";
|
||||
static final String METRICS_NAME = "Server";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under.
|
||||
*/
|
||||
String METRICS_CONTEXT = "regionserver";
|
||||
static final String METRICS_CONTEXT = "regionserver";
|
||||
|
||||
/**
|
||||
* Description
|
||||
*/
|
||||
String METRICS_DESCRIPTION = "Metrics about HBase RegionServer";
|
||||
static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under in jmx
|
||||
*/
|
||||
String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
|
||||
static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
|
||||
|
||||
/**
|
||||
* Update the Put time histogram
|
||||
|
@ -113,103 +113,103 @@ public interface MetricsRegionServerSource extends BaseSource {
|
|||
void incrSlowAppend();
|
||||
|
||||
// Strings used for exporting to metrics system.
|
||||
String REGION_COUNT = "regionCount";
|
||||
String REGION_COUNT_DESC = "Number of regions";
|
||||
String STORE_COUNT = "storeCount";
|
||||
String STORE_COUNT_DESC = "Number of Stores";
|
||||
String STOREFILE_COUNT = "storeFileCount";
|
||||
String STOREFILE_COUNT_DESC = "Number of Store Files";
|
||||
String MEMSTORE_SIZE = "memStoreSize";
|
||||
String MEMSTORE_SIZE_DESC = "Size of the memstore";
|
||||
String STOREFILE_SIZE = "storeFileSize";
|
||||
String STOREFILE_SIZE_DESC = "Size of storefiles being served.";
|
||||
String TOTAL_REQUEST_COUNT = "totalRequestCount";
|
||||
String TOTAL_REQUEST_COUNT_DESC =
|
||||
static final String REGION_COUNT = "regionCount";
|
||||
static final String REGION_COUNT_DESC = "Number of regions";
|
||||
static final String STORE_COUNT = "storeCount";
|
||||
static final String STORE_COUNT_DESC = "Number of Stores";
|
||||
static final String STOREFILE_COUNT = "storeFileCount";
|
||||
static final String STOREFILE_COUNT_DESC = "Number of Store Files";
|
||||
static final String MEMSTORE_SIZE = "memStoreSize";
|
||||
static final String MEMSTORE_SIZE_DESC = "Size of the memstore";
|
||||
static final String STOREFILE_SIZE = "storeFileSize";
|
||||
static final String STOREFILE_SIZE_DESC = "Size of storefiles being served.";
|
||||
static final String TOTAL_REQUEST_COUNT = "totalRequestCount";
|
||||
static final String TOTAL_REQUEST_COUNT_DESC =
|
||||
"Total number of requests this RegionServer has answered.";
|
||||
String READ_REQUEST_COUNT = "readRequestCount";
|
||||
String READ_REQUEST_COUNT_DESC =
|
||||
static final String READ_REQUEST_COUNT = "readRequestCount";
|
||||
static final String READ_REQUEST_COUNT_DESC =
|
||||
"Number of read requests this region server has answered.";
|
||||
String WRITE_REQUEST_COUNT = "writeRequestCount";
|
||||
String WRITE_REQUEST_COUNT_DESC =
|
||||
static final String WRITE_REQUEST_COUNT = "writeRequestCount";
|
||||
static final String WRITE_REQUEST_COUNT_DESC =
|
||||
"Number of mutation requests this region server has answered.";
|
||||
String CHECK_MUTATE_FAILED_COUNT = "checkMutateFailedCount";
|
||||
String CHECK_MUTATE_FAILED_COUNT_DESC =
|
||||
static final String CHECK_MUTATE_FAILED_COUNT = "checkMutateFailedCount";
|
||||
static final String CHECK_MUTATE_FAILED_COUNT_DESC =
|
||||
"Number of Check and Mutate calls that failed the checks.";
|
||||
String CHECK_MUTATE_PASSED_COUNT = "checkMutatePassedCount";
|
||||
String CHECK_MUTATE_PASSED_COUNT_DESC =
|
||||
static final String CHECK_MUTATE_PASSED_COUNT = "checkMutatePassedCount";
|
||||
static final String CHECK_MUTATE_PASSED_COUNT_DESC =
|
||||
"Number of Check and Mutate calls that passed the checks.";
|
||||
String STOREFILE_INDEX_SIZE = "storeFileIndexSize";
|
||||
String STOREFILE_INDEX_SIZE_DESC = "Size of indexes in storefiles on disk.";
|
||||
String STATIC_INDEX_SIZE = "staticIndexSize";
|
||||
String STATIC_INDEX_SIZE_DESC = "Uncompressed size of the static indexes.";
|
||||
String STATIC_BLOOM_SIZE = "staticBloomSize";
|
||||
String STATIC_BLOOM_SIZE_DESC =
|
||||
static final String STOREFILE_INDEX_SIZE = "storeFileIndexSize";
|
||||
static final String STOREFILE_INDEX_SIZE_DESC = "Size of indexes in storefiles on disk.";
|
||||
static final String STATIC_INDEX_SIZE = "staticIndexSize";
|
||||
static final String STATIC_INDEX_SIZE_DESC = "Uncompressed size of the static indexes.";
|
||||
static final String STATIC_BLOOM_SIZE = "staticBloomSize";
|
||||
static final String STATIC_BLOOM_SIZE_DESC =
|
||||
"Uncompressed size of the static bloom filters.";
|
||||
String NUMBER_OF_MUTATIONS_WITHOUT_WAL = "mutationsWithoutWALCount";
|
||||
String NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC =
|
||||
static final String NUMBER_OF_MUTATIONS_WITHOUT_WAL = "mutationsWithoutWALCount";
|
||||
static final String NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC =
|
||||
"Number of mutations that have been sent by clients with the write ahead logging turned off.";
|
||||
String DATA_SIZE_WITHOUT_WAL = "mutationsWithoutWALSize";
|
||||
String DATA_SIZE_WITHOUT_WAL_DESC =
|
||||
static final String DATA_SIZE_WITHOUT_WAL = "mutationsWithoutWALSize";
|
||||
static final String DATA_SIZE_WITHOUT_WAL_DESC =
|
||||
"Size of data that has been sent by clients with the write ahead logging turned off.";
|
||||
String PERCENT_FILES_LOCAL = "percentFilesLocal";
|
||||
String PERCENT_FILES_LOCAL_DESC =
|
||||
static final String PERCENT_FILES_LOCAL = "percentFilesLocal";
|
||||
static final String PERCENT_FILES_LOCAL_DESC =
|
||||
"The percent of HFiles that are stored on the local hdfs data node.";
|
||||
String COMPACTION_QUEUE_LENGTH = "compactionQueueLength";
|
||||
String COMPACTION_QUEUE_LENGTH_DESC = "Length of the queue for compactions.";
|
||||
String FLUSH_QUEUE_LENGTH = "flushQueueLength";
|
||||
String FLUSH_QUEUE_LENGTH_DESC = "Length of the queue for region flushes";
|
||||
String BLOCK_CACHE_FREE_SIZE = "blockCacheFreeSize";
|
||||
String BLOCK_CACHE_FREE_DESC =
|
||||
static final String COMPACTION_QUEUE_LENGTH = "compactionQueueLength";
|
||||
static final String COMPACTION_QUEUE_LENGTH_DESC = "Length of the queue for compactions.";
|
||||
static final String FLUSH_QUEUE_LENGTH = "flushQueueLength";
|
||||
static final String FLUSH_QUEUE_LENGTH_DESC = "Length of the queue for region flushes";
|
||||
static final String BLOCK_CACHE_FREE_SIZE = "blockCacheFreeSize";
|
||||
static final String BLOCK_CACHE_FREE_DESC =
|
||||
"Size of the block cache that is not occupied.";
|
||||
String BLOCK_CACHE_COUNT = "blockCacheCount";
|
||||
String BLOCK_CACHE_COUNT_DESC = "Number of block in the block cache.";
|
||||
String BLOCK_CACHE_SIZE = "blockCacheSize";
|
||||
String BLOCK_CACHE_SIZE_DESC = "Size of the block cache.";
|
||||
String BLOCK_CACHE_HIT_COUNT = "blockCacheHitCount";
|
||||
String BLOCK_CACHE_HIT_COUNT_DESC = "Count of the hit on the block cache.";
|
||||
String BLOCK_CACHE_MISS_COUNT = "blockCacheMissCount";
|
||||
String BLOCK_COUNT_MISS_COUNT_DESC =
|
||||
static final String BLOCK_CACHE_COUNT = "blockCacheCount";
|
||||
static final String BLOCK_CACHE_COUNT_DESC = "Number of block in the block cache.";
|
||||
static final String BLOCK_CACHE_SIZE = "blockCacheSize";
|
||||
static final String BLOCK_CACHE_SIZE_DESC = "Size of the block cache.";
|
||||
static final String BLOCK_CACHE_HIT_COUNT = "blockCacheHitCount";
|
||||
static final String BLOCK_CACHE_HIT_COUNT_DESC = "Count of the hit on the block cache.";
|
||||
static final String BLOCK_CACHE_MISS_COUNT = "blockCacheMissCount";
|
||||
static final String BLOCK_COUNT_MISS_COUNT_DESC =
|
||||
"Number of requests for a block that missed the block cache.";
|
||||
String BLOCK_CACHE_EVICTION_COUNT = "blockCacheEvictionCount";
|
||||
String BLOCK_CACHE_EVICTION_COUNT_DESC =
|
||||
static final String BLOCK_CACHE_EVICTION_COUNT = "blockCacheEvictionCount";
|
||||
static final String BLOCK_CACHE_EVICTION_COUNT_DESC =
|
||||
"Count of the number of blocks evicted from the block cache.";
|
||||
String BLOCK_CACHE_HIT_PERCENT = "blockCountHitPercent";
|
||||
String BLOCK_CACHE_HIT_PERCENT_DESC =
|
||||
static final String BLOCK_CACHE_HIT_PERCENT = "blockCountHitPercent";
|
||||
static final String BLOCK_CACHE_HIT_PERCENT_DESC =
|
||||
"Percent of block cache requests that are hits";
|
||||
String BLOCK_CACHE_EXPRESS_HIT_PERCENT = "blockCacheExpressHitPercent";
|
||||
String BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC =
|
||||
static final String BLOCK_CACHE_EXPRESS_HIT_PERCENT = "blockCacheExpressHitPercent";
|
||||
static final String BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC =
|
||||
"The percent of the time that requests with the cache turned on hit the cache.";
|
||||
String RS_START_TIME_NAME = "regionServerStartTime";
|
||||
String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum";
|
||||
String SERVER_NAME_NAME = "serverName";
|
||||
String CLUSTER_ID_NAME = "clusterId";
|
||||
String RS_START_TIME_DESC = "RegionServer Start Time";
|
||||
String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum";
|
||||
String SERVER_NAME_DESC = "Server Name";
|
||||
String CLUSTER_ID_DESC = "Cluster Id";
|
||||
String UPDATES_BLOCKED_TIME = "updatesBlockedTime";
|
||||
String UPDATES_BLOCKED_DESC =
|
||||
static final String RS_START_TIME_NAME = "regionServerStartTime";
|
||||
static final String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum";
|
||||
static final String SERVER_NAME_NAME = "serverName";
|
||||
static final String CLUSTER_ID_NAME = "clusterId";
|
||||
static final String RS_START_TIME_DESC = "RegionServer Start Time";
|
||||
static final String ZOOKEEPER_QUORUM_DESC = "Zookeeper Quorum";
|
||||
static final String SERVER_NAME_DESC = "Server Name";
|
||||
static final String CLUSTER_ID_DESC = "Cluster Id";
|
||||
static final String UPDATES_BLOCKED_TIME = "updatesBlockedTime";
|
||||
static final String UPDATES_BLOCKED_DESC =
|
||||
"Number of MS updates have been blocked so that the memstore can be flushed.";
|
||||
String DELETE_KEY = "delete";
|
||||
String GET_KEY = "get";
|
||||
String INCREMENT_KEY = "increment";
|
||||
String MUTATE_KEY = "mutate";
|
||||
String APPEND_KEY = "append";
|
||||
String REPLAY_KEY = "replay";
|
||||
String SCAN_NEXT_KEY = "scanNext";
|
||||
String SLOW_MUTATE_KEY = "slowPutCount";
|
||||
String SLOW_GET_KEY = "slowGetCount";
|
||||
String SLOW_DELETE_KEY = "slowDeleteCount";
|
||||
String SLOW_INCREMENT_KEY = "slowIncrementCount";
|
||||
String SLOW_APPEND_KEY = "slowAppendCount";
|
||||
String SLOW_MUTATE_DESC =
|
||||
static final String DELETE_KEY = "delete";
|
||||
static final String GET_KEY = "get";
|
||||
static final String INCREMENT_KEY = "increment";
|
||||
static final String MUTATE_KEY = "mutate";
|
||||
static final String APPEND_KEY = "append";
|
||||
static final String REPLAY_KEY = "replay";
|
||||
static final String SCAN_NEXT_KEY = "scanNext";
|
||||
static final String SLOW_MUTATE_KEY = "slowPutCount";
|
||||
static final String SLOW_GET_KEY = "slowGetCount";
|
||||
static final String SLOW_DELETE_KEY = "slowDeleteCount";
|
||||
static final String SLOW_INCREMENT_KEY = "slowIncrementCount";
|
||||
static final String SLOW_APPEND_KEY = "slowAppendCount";
|
||||
static final String SLOW_MUTATE_DESC =
|
||||
"The number of Multis that took over 1000ms to complete";
|
||||
String SLOW_DELETE_DESC =
|
||||
static final String SLOW_DELETE_DESC =
|
||||
"The number of Deletes that took over 1000ms to complete";
|
||||
String SLOW_GET_DESC = "The number of Gets that took over 1000ms to complete";
|
||||
String SLOW_INCREMENT_DESC =
|
||||
static final String SLOW_GET_DESC = "The number of Gets that took over 1000ms to complete";
|
||||
static final String SLOW_INCREMENT_DESC =
|
||||
"The number of Increments that took over 1000ms to complete";
|
||||
String SLOW_APPEND_DESC =
|
||||
static final String SLOW_APPEND_DESC =
|
||||
"The number of Appends that took over 1000ms to complete";
|
||||
|
||||
|
||||
|
|
|
@ -27,35 +27,35 @@ public interface MetricsRegionServerWrapper {
|
|||
/**
|
||||
* Get ServerName
|
||||
*/
|
||||
String getServerName();
|
||||
public String getServerName();
|
||||
|
||||
/**
|
||||
* Get the Cluster ID
|
||||
*
|
||||
* @return Cluster ID
|
||||
*/
|
||||
String getClusterId();
|
||||
public String getClusterId();
|
||||
|
||||
/**
|
||||
* Get the Zookeeper Quorum Info
|
||||
*
|
||||
* @return Zookeeper Quorum Info
|
||||
*/
|
||||
String getZookeeperQuorum();
|
||||
public String getZookeeperQuorum();
|
||||
|
||||
/**
|
||||
* Get the co-processors
|
||||
*
|
||||
* @return Co-processors
|
||||
*/
|
||||
String getCoprocessors();
|
||||
public String getCoprocessors();
|
||||
|
||||
/**
|
||||
* Get HRegionServer start time
|
||||
*
|
||||
* @return Start time of RegionServer in milliseconds
|
||||
*/
|
||||
long getStartCode();
|
||||
public long getStartCode();
|
||||
|
||||
/**
|
||||
* The number of online regions
|
||||
|
|
|
@ -25,8 +25,8 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
*/
|
||||
public interface MetricsRegionSource extends Comparable<MetricsRegionSource> {
|
||||
|
||||
String OPS_SAMPLE_NAME = "ops";
|
||||
String SIZE_VALUE_NAME = "size";
|
||||
public static final String OPS_SAMPLE_NAME = "ops";
|
||||
public static final String SIZE_VALUE_NAME = "size";
|
||||
|
||||
/**
|
||||
* Close the region's metrics as this region is closing.
|
||||
|
|
|
@ -29,30 +29,30 @@ public interface MetricsEditsReplaySource extends BaseSource {
|
|||
/**
|
||||
* The name of the metrics
|
||||
*/
|
||||
String METRICS_NAME = "replay";
|
||||
static final String METRICS_NAME = "replay";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under.
|
||||
*/
|
||||
String METRICS_CONTEXT = "regionserver";
|
||||
static final String METRICS_CONTEXT = "regionserver";
|
||||
|
||||
/**
|
||||
* Description
|
||||
*/
|
||||
String METRICS_DESCRIPTION = "Metrics about HBase RegionServer HLog Edits Replay";
|
||||
static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer HLog Edits Replay";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under in jmx
|
||||
*/
|
||||
String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
|
||||
static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
|
||||
|
||||
|
||||
String REPLAY_TIME_NAME = "replayTime";
|
||||
String REPLAY_TIME_DESC = "Time an replay operation took.";
|
||||
String REPLAY_BATCH_SIZE_NAME = "replayBatchSize";
|
||||
String REPLAY_BATCH_SIZE_DESC = "Number of changes in each replay batch.";
|
||||
String REPLAY_DATA_SIZE_NAME = "replayDataSize";
|
||||
String REPLAY_DATA_SIZE_DESC = "Size (in bytes) of the data of each replay.";
|
||||
static final String REPLAY_TIME_NAME = "replayTime";
|
||||
static final String REPLAY_TIME_DESC = "Time an replay operation took.";
|
||||
static final String REPLAY_BATCH_SIZE_NAME = "replayBatchSize";
|
||||
static final String REPLAY_BATCH_SIZE_DESC = "Number of changes in each replay batch.";
|
||||
static final String REPLAY_DATA_SIZE_NAME = "replayDataSize";
|
||||
static final String REPLAY_DATA_SIZE_DESC = "Size (in bytes) of the data of each replay.";
|
||||
|
||||
/**
|
||||
* Add the time a replay command took
|
||||
|
|
|
@ -29,34 +29,34 @@ public interface MetricsWALSource extends BaseSource {
|
|||
/**
|
||||
* The name of the metrics
|
||||
*/
|
||||
String METRICS_NAME = "WAL";
|
||||
static final String METRICS_NAME = "WAL";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under.
|
||||
*/
|
||||
String METRICS_CONTEXT = "regionserver";
|
||||
static final String METRICS_CONTEXT = "regionserver";
|
||||
|
||||
/**
|
||||
* Description
|
||||
*/
|
||||
String METRICS_DESCRIPTION = "Metrics about HBase RegionServer HLog";
|
||||
static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer HLog";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under in jmx
|
||||
*/
|
||||
String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
|
||||
static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
|
||||
|
||||
|
||||
String APPEND_TIME = "appendTime";
|
||||
String APPEND_TIME_DESC = "Time an append to the log took.";
|
||||
String APPEND_COUNT = "appendCount";
|
||||
String APPEND_COUNT_DESC = "Number of appends to the write ahead log.";
|
||||
String APPEND_SIZE = "appendSize";
|
||||
String APPEND_SIZE_DESC = "Size (in bytes) of the data appended to the HLog.";
|
||||
String SLOW_APPEND_COUNT = "slowAppendCount";
|
||||
String SLOW_APPEND_COUNT_DESC = "Number of appends that were slow.";
|
||||
String SYNC_TIME = "syncTime";
|
||||
String SYNC_TIME_DESC = "The time it took to sync the HLog to HDFS.";
|
||||
static final String APPEND_TIME = "appendTime";
|
||||
static final String APPEND_TIME_DESC = "Time an append to the log took.";
|
||||
static final String APPEND_COUNT = "appendCount";
|
||||
static final String APPEND_COUNT_DESC = "Number of appends to the write ahead log.";
|
||||
static final String APPEND_SIZE = "appendSize";
|
||||
static final String APPEND_SIZE_DESC = "Size (in bytes) of the data appended to the HLog.";
|
||||
static final String SLOW_APPEND_COUNT = "slowAppendCount";
|
||||
static final String SLOW_APPEND_COUNT_DESC = "Number of appends that were slow.";
|
||||
static final String SYNC_TIME = "syncTime";
|
||||
static final String SYNC_TIME_DESC = "The time it took to sync the HLog to HDFS.";
|
||||
|
||||
/**
|
||||
* Add the append size.
|
||||
|
|
|
@ -28,21 +28,21 @@ public interface MetricsReplicationSource extends BaseSource {
|
|||
/**
|
||||
* The name of the metrics
|
||||
*/
|
||||
String METRICS_NAME = "Replication";
|
||||
static final String METRICS_NAME = "Replication";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under.
|
||||
*/
|
||||
String METRICS_CONTEXT = "regionserver";
|
||||
static final String METRICS_CONTEXT = "regionserver";
|
||||
|
||||
/**
|
||||
* The name of the metrics context that metrics will be under.
|
||||
*/
|
||||
String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
|
||||
static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
|
||||
|
||||
/**
|
||||
* A description.
|
||||
*/
|
||||
String METRICS_DESCRIPTION = "Metrics about HBase replication";
|
||||
static final String METRICS_DESCRIPTION = "Metrics about HBase replication";
|
||||
|
||||
}
|
||||
|
|
|
@ -25,27 +25,27 @@ import org.apache.hadoop.hbase.metrics.BaseSource;
|
|||
*/
|
||||
public interface MetricsRESTSource extends BaseSource {
|
||||
|
||||
String METRICS_NAME = "REST";
|
||||
public static String METRICS_NAME = "REST";
|
||||
|
||||
String CONTEXT = "rest";
|
||||
public static String CONTEXT = "rest";
|
||||
|
||||
String JMX_CONTEXT = "REST";
|
||||
public static String JMX_CONTEXT = "REST";
|
||||
|
||||
String METRICS_DESCRIPTION = "Metrics about the HBase REST server";
|
||||
public static String METRICS_DESCRIPTION = "Metrics about the HBase REST server";
|
||||
|
||||
String REQUEST_KEY = "requests";
|
||||
static String REQUEST_KEY = "requests";
|
||||
|
||||
String SUCCESSFUL_GET_KEY = "successfulGet";
|
||||
static String SUCCESSFUL_GET_KEY = "successfulGet";
|
||||
|
||||
String SUCCESSFUL_PUT_KEY = "successfulPut";
|
||||
static String SUCCESSFUL_PUT_KEY = "successfulPut";
|
||||
|
||||
String SUCCESSFUL_DELETE_KEY = "successfulDelete";
|
||||
static String SUCCESSFUL_DELETE_KEY = "successfulDelete";
|
||||
|
||||
String FAILED_GET_KEY = "failedGet";
|
||||
static String FAILED_GET_KEY = "failedGet";
|
||||
|
||||
String FAILED_PUT_KEY = "failedPut";
|
||||
static String FAILED_PUT_KEY = "failedPut";
|
||||
|
||||
String FAILED_DELETE_KEY = "failedDelete";
|
||||
static String FAILED_DELETE_KEY = "failedDelete";
|
||||
|
||||
/**
|
||||
* Increment the number of requests
|
||||
|
|
|
@ -25,12 +25,12 @@ import org.apache.hadoop.hbase.metrics.BaseSource;
|
|||
*/
|
||||
public interface MetricsThriftServerSource extends BaseSource {
|
||||
|
||||
String BATCH_GET_KEY = "batchGet";
|
||||
String BATCH_MUTATE_KEY = "batchMutate";
|
||||
String TIME_IN_QUEUE_KEY = "timeInQueue";
|
||||
String THRIFT_CALL_KEY = "thriftCall";
|
||||
String SLOW_THRIFT_CALL_KEY = "slowThriftCall";
|
||||
String CALL_QUEUE_LEN_KEY = "callQueueLen";
|
||||
static final String BATCH_GET_KEY = "batchGet";
|
||||
static final String BATCH_MUTATE_KEY = "batchMutate";
|
||||
static final String TIME_IN_QUEUE_KEY = "timeInQueue";
|
||||
static final String THRIFT_CALL_KEY = "thriftCall";
|
||||
static final String SLOW_THRIFT_CALL_KEY = "slowThriftCall";
|
||||
static final String CALL_QUEUE_LEN_KEY = "callQueueLen";
|
||||
|
||||
/**
|
||||
* Add how long an operation was in the queue.
|
||||
|
|
|
@ -21,12 +21,12 @@ package org.apache.hadoop.hbase.thrift;
|
|||
/** Factory that will be used to create metrics sources for the two diffent types of thrift servers. */
|
||||
public interface MetricsThriftServerSourceFactory {
|
||||
|
||||
String METRICS_NAME = "Thrift";
|
||||
String METRICS_DESCRIPTION = "Thrift Server Metrics";
|
||||
String THRIFT_ONE_METRICS_CONTEXT = "thrift-one";
|
||||
String THRIFT_ONE_JMX_CONTEXT = "Thrift,sub=ThriftOne";
|
||||
String THRIFT_TWO_METRICS_CONTEXT = "thrift-two";
|
||||
String THRIFT_TWO_JMX_CONTEXT = "Thrift,sub=ThriftTwo";
|
||||
static final String METRICS_NAME = "Thrift";
|
||||
static final String METRICS_DESCRIPTION = "Thrift Server Metrics";
|
||||
static final String THRIFT_ONE_METRICS_CONTEXT = "thrift-one";
|
||||
static final String THRIFT_ONE_JMX_CONTEXT = "Thrift,sub=ThriftOne";
|
||||
static final String THRIFT_TWO_METRICS_CONTEXT = "thrift-two";
|
||||
static final String THRIFT_TWO_JMX_CONTEXT = "Thrift,sub=ThriftTwo";
|
||||
|
||||
/** Create a Source for a thrift one server */
|
||||
MetricsThriftServerSource createThriftOneSource();
|
||||
|
|
|
@ -25,14 +25,14 @@ package org.apache.hadoop.metrics2;
|
|||
public interface MetricHistogram {
|
||||
|
||||
//Strings used to create metrics names.
|
||||
String NUM_OPS_METRIC_NAME = "_num_ops";
|
||||
String MIN_METRIC_NAME = "_min";
|
||||
String MAX_METRIC_NAME = "_max";
|
||||
String MEAN_METRIC_NAME = "_mean";
|
||||
String MEDIAN_METRIC_NAME = "_median";
|
||||
String SEVENTY_FIFTH_PERCENTILE_METRIC_NAME = "_75th_percentile";
|
||||
String NINETY_FIFTH_PERCENTILE_METRIC_NAME = "_95th_percentile";
|
||||
String NINETY_NINETH_PERCENTILE_METRIC_NAME = "_99th_percentile";
|
||||
static final String NUM_OPS_METRIC_NAME = "_num_ops";
|
||||
static final String MIN_METRIC_NAME = "_min";
|
||||
static final String MAX_METRIC_NAME = "_max";
|
||||
static final String MEAN_METRIC_NAME = "_mean";
|
||||
static final String MEDIAN_METRIC_NAME = "_median";
|
||||
static final String SEVENTY_FIFTH_PERCENTILE_METRIC_NAME = "_75th_percentile";
|
||||
static final String NINETY_FIFTH_PERCENTILE_METRIC_NAME = "_95th_percentile";
|
||||
static final String NINETY_NINETH_PERCENTILE_METRIC_NAME = "_99th_percentile";
|
||||
|
||||
/**
|
||||
* Add a single value to a histogram's stream of values.
|
||||
|
|
|
@ -32,6 +32,6 @@ public interface HadoopShims {
|
|||
* TaskAttemptId.forName()
|
||||
* @return a concrete TaskAttemptContext instance of o.a.h.mapreduce.TaskAttemptContext
|
||||
*/
|
||||
<T,J> T createTestTaskAttemptContext(final J job, final String taskId);
|
||||
public <T,J> T createTestTaskAttemptContext(final J job, final String taskId);
|
||||
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ public interface MetricsAssertHelper {
|
|||
* Init helper. This method will make sure that the metrics system is set
|
||||
* up for tests.
|
||||
*/
|
||||
void init();
|
||||
public void init();
|
||||
|
||||
/**
|
||||
* Assert that a tag exists and has a given value.
|
||||
|
@ -37,7 +37,7 @@ public interface MetricsAssertHelper {
|
|||
* @param source The BaseSource{@link BaseSource} that will provide the tags,
|
||||
* gauges, and counters.
|
||||
*/
|
||||
void assertTag(String name, String expected, BaseSource source);
|
||||
public void assertTag(String name, String expected, BaseSource source);
|
||||
|
||||
/**
|
||||
* Assert that a gauge exists and that it's value is equal to the expected value.
|
||||
|
@ -47,7 +47,7 @@ public interface MetricsAssertHelper {
|
|||
* @param source The BaseSource{@link BaseSource} that will provide the tags,
|
||||
* gauges, and counters.
|
||||
*/
|
||||
void assertGauge(String name, long expected, BaseSource source);
|
||||
public void assertGauge(String name, long expected, BaseSource source);
|
||||
|
||||
/**
|
||||
* Assert that a gauge exists and it's value is greater than a given value
|
||||
|
@ -57,7 +57,7 @@ public interface MetricsAssertHelper {
|
|||
* @param source The BaseSource{@link BaseSource} that will provide the tags,
|
||||
* gauges, and counters.
|
||||
*/
|
||||
void assertGaugeGt(String name, long expected, BaseSource source);
|
||||
public void assertGaugeGt(String name, long expected, BaseSource source);
|
||||
|
||||
/**
|
||||
* Assert that a gauge exists and it's value is less than a given value
|
||||
|
@ -67,7 +67,7 @@ public interface MetricsAssertHelper {
|
|||
* @param source The BaseSource{@link BaseSource} that will provide the tags,
|
||||
* gauges, and counters.
|
||||
*/
|
||||
void assertGaugeLt(String name, long expected, BaseSource source);
|
||||
public void assertGaugeLt(String name, long expected, BaseSource source);
|
||||
|
||||
/**
|
||||
* Assert that a gauge exists and that it's value is equal to the expected value.
|
||||
|
@ -77,7 +77,7 @@ public interface MetricsAssertHelper {
|
|||
* @param source The BaseSource{@link BaseSource} that will provide the tags,
|
||||
* gauges, and counters.
|
||||
*/
|
||||
void assertGauge(String name, double expected, BaseSource source);
|
||||
public void assertGauge(String name, double expected, BaseSource source);
|
||||
|
||||
/**
|
||||
* Assert that a gauge exists and it's value is greater than a given value
|
||||
|
@ -87,7 +87,7 @@ public interface MetricsAssertHelper {
|
|||
* @param source The BaseSource{@link BaseSource} that will provide the tags,
|
||||
* gauges, and counters.
|
||||
*/
|
||||
void assertGaugeGt(String name, double expected, BaseSource source);
|
||||
public void assertGaugeGt(String name, double expected, BaseSource source);
|
||||
|
||||
/**
|
||||
* Assert that a gauge exists and it's value is less than a given value
|
||||
|
@ -97,7 +97,7 @@ public interface MetricsAssertHelper {
|
|||
* @param source The BaseSource{@link BaseSource} that will provide the tags,
|
||||
* gauges, and counters.
|
||||
*/
|
||||
void assertGaugeLt(String name, double expected, BaseSource source);
|
||||
public void assertGaugeLt(String name, double expected, BaseSource source);
|
||||
|
||||
/**
|
||||
* Assert that a counter exists and that it's value is equal to the expected value.
|
||||
|
@ -107,7 +107,7 @@ public interface MetricsAssertHelper {
|
|||
* @param source The BaseSource{@link BaseSource} that will provide the tags,
|
||||
* gauges, and counters.
|
||||
*/
|
||||
void assertCounter(String name, long expected, BaseSource source);
|
||||
public void assertCounter(String name, long expected, BaseSource source);
|
||||
|
||||
/**
|
||||
* Assert that a counter exists and that it's value is greater than the given value.
|
||||
|
@ -117,7 +117,7 @@ public interface MetricsAssertHelper {
|
|||
* @param source The BaseSource{@link BaseSource} that will provide the tags,
|
||||
* gauges, and counters.
|
||||
*/
|
||||
void assertCounterGt(String name, long expected, BaseSource source);
|
||||
public void assertCounterGt(String name, long expected, BaseSource source);
|
||||
|
||||
/**
|
||||
* Assert that a counter exists and that it's value is less than the given value.
|
||||
|
@ -127,7 +127,7 @@ public interface MetricsAssertHelper {
|
|||
* @param source The BaseSource{@link BaseSource} that will provide the tags,
|
||||
* gauges, and counters.
|
||||
*/
|
||||
void assertCounterLt(String name, long expected, BaseSource source);
|
||||
public void assertCounterLt(String name, long expected, BaseSource source);
|
||||
|
||||
/**
|
||||
* Get the value of a counter.
|
||||
|
@ -137,7 +137,7 @@ public interface MetricsAssertHelper {
|
|||
* gauges, and counters.
|
||||
* @return long value of the counter.
|
||||
*/
|
||||
long getCounter(String name, BaseSource source);
|
||||
public long getCounter(String name, BaseSource source);
|
||||
|
||||
/**
|
||||
* Get the value of a gauge as a double.
|
||||
|
@ -147,7 +147,7 @@ public interface MetricsAssertHelper {
|
|||
* gauges, and counters.
|
||||
* @return double value of the gauge.
|
||||
*/
|
||||
double getGaugeDouble(String name, BaseSource source);
|
||||
public double getGaugeDouble(String name, BaseSource source);
|
||||
|
||||
/**
|
||||
* Get the value of a gauge as a long.
|
||||
|
@ -157,5 +157,5 @@ public interface MetricsAssertHelper {
|
|||
* gauges, and counters.
|
||||
* @return long value of the gauge.
|
||||
*/
|
||||
long getGaugeLong(String name, BaseSource source);
|
||||
public long getGaugeLong(String name, BaseSource source);
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ public interface TestTokenizerData {
|
|||
List<byte[]> getInputs();
|
||||
List<byte[]> getOutputs();
|
||||
|
||||
class InMemory {
|
||||
public static class InMemory {
|
||||
public Collection<Object[]> getAllAsObjectArray() {
|
||||
List<Object[]> all = Lists.newArrayList();
|
||||
all.add(new Object[] { new TestTokenizerDataBasic() });
|
||||
|
|
|
@ -32,7 +32,7 @@ public interface TestColumnData {
|
|||
List<ByteRange> getInputs();
|
||||
List<ByteRange> getOutputs();
|
||||
|
||||
class InMemory {
|
||||
public static class InMemory {
|
||||
public Collection<Object[]> getAllAsObjectArray() {
|
||||
List<Object[]> all = Lists.newArrayList();
|
||||
all.add(new Object[] { new TestColumnDataSimple() });
|
||||
|
|
|
@ -54,7 +54,7 @@ public interface TestRowData {
|
|||
|
||||
void individualSearcherAssertions(CellSearcher searcher);
|
||||
|
||||
class InMemory {
|
||||
public static class InMemory {
|
||||
|
||||
/*
|
||||
* The following are different styles of data that the codec may encounter. Having these small
|
||||
|
|
|
@ -33,7 +33,7 @@ public interface TestTimestampData {
|
|||
long getMinimum();
|
||||
List<Long> getOutputs();
|
||||
|
||||
class InMemory {
|
||||
public static class InMemory {
|
||||
public Collection<Object[]> getAllAsObjectArray() {
|
||||
List<Object[]> all = Lists.newArrayList();
|
||||
all.add(new Object[] { new TestTimestampDataBasic() });
|
||||
|
|
|
@ -36,7 +36,7 @@ public interface InterProcessLock {
|
|||
* @throws InterruptedException If current thread is interrupted while
|
||||
* waiting for the lock
|
||||
*/
|
||||
void acquire() throws IOException, InterruptedException;
|
||||
public void acquire() throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Acquire the lock within a wait time.
|
||||
|
@ -50,7 +50,7 @@ public interface InterProcessLock {
|
|||
* @throws InterruptedException If the thread is interrupted while waiting to
|
||||
* acquire the lock
|
||||
*/
|
||||
boolean tryAcquire(long timeoutMs)
|
||||
public boolean tryAcquire(long timeoutMs)
|
||||
throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
|
@ -59,7 +59,7 @@ public interface InterProcessLock {
|
|||
* @throws InterruptedException If the thread is interrupted while releasing
|
||||
* the lock
|
||||
*/
|
||||
void release() throws IOException, InterruptedException;
|
||||
public void release() throws IOException, InterruptedException;
|
||||
|
||||
/**
|
||||
* If supported, attempts to reap all the locks of this type by forcefully
|
||||
|
@ -69,7 +69,7 @@ public interface InterProcessLock {
|
|||
* lock holder is still alive.
|
||||
* @throws IOException If there is an unrecoverable error reaping the locks
|
||||
*/
|
||||
void reapExpiredLocks(long expireTimeoutMs) throws IOException;
|
||||
public void reapExpiredLocks(long expireTimeoutMs) throws IOException;
|
||||
|
||||
/**
|
||||
* If supported, attempts to reap all the locks of this type by forcefully
|
||||
|
@ -80,12 +80,12 @@ public interface InterProcessLock {
|
|||
* with timeout=0.
|
||||
* @throws IOException If there is an unrecoverable error reaping the locks
|
||||
*/
|
||||
void reapAllLocks() throws IOException;
|
||||
public void reapAllLocks() throws IOException;
|
||||
|
||||
/**
|
||||
* An interface for objects that process lock metadata.
|
||||
*/
|
||||
interface MetadataHandler {
|
||||
public static interface MetadataHandler {
|
||||
|
||||
/**
|
||||
* Called after lock metadata is successfully read from a distributed
|
||||
|
@ -93,7 +93,7 @@ public interface InterProcessLock {
|
|||
* printing the metadata in a humanly-readable format.
|
||||
* @param metadata The metadata
|
||||
*/
|
||||
void handleMetadata(byte[] metadata);
|
||||
public void handleMetadata(byte[] metadata);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -101,5 +101,5 @@ public interface InterProcessLock {
|
|||
* {@link MetadataHandler}.
|
||||
* @throws InterruptedException If there is an unrecoverable error
|
||||
*/
|
||||
void visitLocks(MetadataHandler handler) throws IOException;
|
||||
public void visitLocks(MetadataHandler handler) throws IOException;
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ public interface InterProcessReadWriteLock {
|
|||
* which the lock was acquired).
|
||||
* @return An instantiated InterProcessLock instance
|
||||
*/
|
||||
InterProcessLock readLock(byte[] metadata);
|
||||
public InterProcessLock readLock(byte[] metadata);
|
||||
|
||||
/**
|
||||
* Obtain a write lock containing given metadata.
|
||||
|
@ -43,5 +43,5 @@ public interface InterProcessReadWriteLock {
|
|||
* which the lock was acquired).
|
||||
* @return An instantiated InterProcessLock instance
|
||||
*/
|
||||
InterProcessLock writeLock(byte[] metadata);
|
||||
public InterProcessLock writeLock(byte[] metadata);
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ public interface TableDescriptors {
|
|||
* @return HTableDescriptor for tablename
|
||||
* @throws IOException
|
||||
*/
|
||||
HTableDescriptor get(final String tablename)
|
||||
public HTableDescriptor get(final String tablename)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -43,7 +43,7 @@ public interface TableDescriptors {
|
|||
* @return HTableDescriptor for tablename
|
||||
* @throws IOException
|
||||
*/
|
||||
HTableDescriptor get(final byte[] tablename)
|
||||
public HTableDescriptor get(final byte[] tablename)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -52,7 +52,7 @@ public interface TableDescriptors {
|
|||
* @return Map of all descriptors.
|
||||
* @throws IOException
|
||||
*/
|
||||
Map<String, HTableDescriptor> getAll()
|
||||
public Map<String, HTableDescriptor> getAll()
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -60,7 +60,7 @@ public interface TableDescriptors {
|
|||
* @param htd Descriptor to set into TableDescriptors
|
||||
* @throws IOException
|
||||
*/
|
||||
void add(final HTableDescriptor htd)
|
||||
public void add(final HTableDescriptor htd)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -68,6 +68,6 @@ public interface TableDescriptors {
|
|||
* @return Instance of table descriptor or null if none found.
|
||||
* @throws IOException
|
||||
*/
|
||||
HTableDescriptor remove(final String tablename)
|
||||
public HTableDescriptor remove(final String tablename)
|
||||
throws IOException;
|
||||
}
|
||||
|
|
|
@ -76,6 +76,6 @@ public interface Constraint extends Configurable {
|
|||
* @throws org.apache.hadoop.hbase.exceptions.ConstraintException when the {@link Put} does not match the
|
||||
* constraint.
|
||||
*/
|
||||
void check(Put p) throws ConstraintException;
|
||||
public void check(Put p) throws ConstraintException;
|
||||
|
||||
}
|
||||
|
|
|
@ -29,5 +29,5 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public interface CoprocessorService {
|
||||
Service getService();
|
||||
public Service getService();
|
||||
}
|
||||
|
|
|
@ -31,12 +31,12 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
|||
@InterfaceStability.Evolving
|
||||
public interface RegionCoprocessorEnvironment extends CoprocessorEnvironment {
|
||||
/** @return the region associated with this coprocessor */
|
||||
HRegion getRegion();
|
||||
public HRegion getRegion();
|
||||
|
||||
/** @return reference to the region server services */
|
||||
RegionServerServices getRegionServerServices();
|
||||
public RegionServerServices getRegionServerServices();
|
||||
|
||||
/** @return shared data between all instances of this coprocessor */
|
||||
ConcurrentMap<String, Object> getSharedData();
|
||||
public ConcurrentMap<String, Object> getSharedData();
|
||||
|
||||
}
|
||||
|
|
|
@ -28,5 +28,5 @@ import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
|||
@InterfaceStability.Evolving
|
||||
public interface WALCoprocessorEnvironment extends CoprocessorEnvironment {
|
||||
/** @return reference to the region server services */
|
||||
HLog getWAL();
|
||||
public HLog getWAL();
|
||||
}
|
||||
|
|
|
@ -36,5 +36,5 @@ public interface ForeignExceptionListener {
|
|||
* Implementers must ensure that this method is thread-safe.
|
||||
* @param e exception causing the error. Implementations must accept and handle null here.
|
||||
*/
|
||||
void receive(ForeignException e);
|
||||
public void receive(ForeignException e);
|
||||
}
|
|
@ -47,7 +47,7 @@ public interface ForeignExceptionSnare {
|
|||
* @throws ForeignException
|
||||
* all exceptions from remote sources are procedure exceptions
|
||||
*/
|
||||
void rethrowException() throws ForeignException;
|
||||
public void rethrowException() throws ForeignException;
|
||||
|
||||
/**
|
||||
* Non-exceptional form of {@link #rethrowException()}. Checks to see if any
|
||||
|
@ -56,12 +56,12 @@ public interface ForeignExceptionSnare {
|
|||
*
|
||||
* @return <tt>true</tt> if there has been an error,<tt>false</tt> otherwise
|
||||
*/
|
||||
boolean hasException();
|
||||
public boolean hasException();
|
||||
|
||||
/**
|
||||
* Get the value of the captured exception.
|
||||
*
|
||||
* @return the captured foreign exception or null if no exception captured.
|
||||
*/
|
||||
ForeignException getException();
|
||||
public ForeignException getException();
|
||||
}
|
||||
|
|
|
@ -87,12 +87,12 @@ public abstract class EventHandler implements Runnable, Comparable<Runnable> {
|
|||
* Called before any event is processed
|
||||
* @param event The event handler whose process method is about to be called.
|
||||
*/
|
||||
void beforeProcess(EventHandler event);
|
||||
public void beforeProcess(EventHandler event);
|
||||
/**
|
||||
* Called after any event is processed
|
||||
* @param event The event handler whose process method is about to be called.
|
||||
*/
|
||||
void afterProcess(EventHandler event);
|
||||
public void afterProcess(EventHandler event);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -296,7 +296,7 @@ public class HFileSystem extends FilterFileSystem {
|
|||
/**
|
||||
* Interface to implement to add a specific reordering logic in hdfs.
|
||||
*/
|
||||
interface ReorderBlocks {
|
||||
static interface ReorderBlocks {
|
||||
/**
|
||||
*
|
||||
* @param conf - the conf to use
|
||||
|
@ -304,7 +304,7 @@ public class HFileSystem extends FilterFileSystem {
|
|||
* @param src - the file name currently read
|
||||
* @throws IOException - if something went wrong
|
||||
*/
|
||||
void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) throws IOException;
|
||||
public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) throws IOException;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -34,5 +34,5 @@ public interface WritableWithSize {
|
|||
*
|
||||
* @return the size of the writable
|
||||
*/
|
||||
long getWritableSize();
|
||||
public long getWritableSize();
|
||||
}
|
||||
|
|
|
@ -36,14 +36,14 @@ public interface BlockCache {
|
|||
* @param buf The block contents wrapped in a ByteBuffer.
|
||||
* @param inMemory Whether block should be treated as in-memory
|
||||
*/
|
||||
void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory);
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory);
|
||||
|
||||
/**
|
||||
* Add block to cache (defaults to not in-memory).
|
||||
* @param cacheKey The block's cache key.
|
||||
* @param buf The object to cache.
|
||||
*/
|
||||
void cacheBlock(BlockCacheKey cacheKey, Cacheable buf);
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf);
|
||||
|
||||
/**
|
||||
* Fetch block from cache.
|
||||
|
@ -54,62 +54,62 @@ public interface BlockCache {
|
|||
* @return Block or null if block is not in 2 cache.
|
||||
* @see HFileReaderV2#readBlock(long, long, boolean, boolean, boolean, BlockType)
|
||||
*/
|
||||
Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat);
|
||||
public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat);
|
||||
|
||||
/**
|
||||
* Evict block from cache.
|
||||
* @param cacheKey Block to evict
|
||||
* @return true if block existed and was evicted, false if not
|
||||
*/
|
||||
boolean evictBlock(BlockCacheKey cacheKey);
|
||||
public boolean evictBlock(BlockCacheKey cacheKey);
|
||||
|
||||
/**
|
||||
* Evicts all blocks for the given HFile.
|
||||
*
|
||||
* @return the number of blocks evicted
|
||||
*/
|
||||
int evictBlocksByHfileName(String hfileName);
|
||||
public int evictBlocksByHfileName(String hfileName);
|
||||
|
||||
/**
|
||||
* Get the statistics for this block cache.
|
||||
* @return Stats
|
||||
*/
|
||||
CacheStats getStats();
|
||||
public CacheStats getStats();
|
||||
|
||||
/**
|
||||
* Shutdown the cache.
|
||||
*/
|
||||
void shutdown();
|
||||
public void shutdown();
|
||||
|
||||
/**
|
||||
* Returns the total size of the block cache, in bytes.
|
||||
* @return size of cache, in bytes
|
||||
*/
|
||||
long size();
|
||||
public long size();
|
||||
|
||||
/**
|
||||
* Returns the free size of the block cache, in bytes.
|
||||
* @return free space in cache, in bytes
|
||||
*/
|
||||
long getFreeSize();
|
||||
public long getFreeSize();
|
||||
|
||||
/**
|
||||
* Returns the occupied size of the block cache, in bytes.
|
||||
* @return occupied space in cache, in bytes
|
||||
*/
|
||||
long getCurrentSize();
|
||||
public long getCurrentSize();
|
||||
|
||||
/**
|
||||
* Returns the number of evictions that have occurred.
|
||||
* @return number of evictions
|
||||
*/
|
||||
long getEvictedCount();
|
||||
public long getEvictedCount();
|
||||
|
||||
/**
|
||||
* Returns the number of blocks currently cached in the block cache.
|
||||
* @return number of blocks in the cache
|
||||
*/
|
||||
long getBlockCount();
|
||||
public long getBlockCount();
|
||||
|
||||
/**
|
||||
* Performs a BlockCache summary and returns a List of BlockCacheColumnFamilySummary objects.
|
||||
|
@ -123,5 +123,5 @@ public interface BlockCache {
|
|||
* @return List of BlockCacheColumnFamilySummary
|
||||
* @throws IOException exception
|
||||
*/
|
||||
List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries(Configuration conf) throws IOException;
|
||||
public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries(Configuration conf) throws IOException;
|
||||
}
|
||||
|
|
|
@ -42,23 +42,23 @@ public interface Cacheable extends HeapSize {
|
|||
* @return int length in bytes of the serialized form.
|
||||
*/
|
||||
|
||||
int getSerializedLength();
|
||||
public int getSerializedLength();
|
||||
|
||||
/**
|
||||
* Serializes its data into destination.
|
||||
*/
|
||||
void serialize(ByteBuffer destination);
|
||||
public void serialize(ByteBuffer destination);
|
||||
|
||||
/**
|
||||
* Returns CacheableDeserializer instance which reconstructs original object from ByteBuffer.
|
||||
*
|
||||
* @return CacheableDeserialzer instance.
|
||||
*/
|
||||
CacheableDeserializer<Cacheable> getDeserializer();
|
||||
public CacheableDeserializer<Cacheable> getDeserializer();
|
||||
|
||||
/**
|
||||
* @return the block type of this cached HFile block
|
||||
*/
|
||||
BlockType getBlockType();
|
||||
public BlockType getBlockType();
|
||||
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ public interface CacheableDeserializer<T extends Cacheable> {
|
|||
*
|
||||
* @return T the deserialized object.
|
||||
*/
|
||||
T deserialize(ByteBuffer b) throws IOException;
|
||||
public T deserialize(ByteBuffer b) throws IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -43,12 +43,12 @@ public interface CacheableDeserializer<T extends Cacheable> {
|
|||
* @return T the deserialized object.
|
||||
* @throws IOException
|
||||
*/
|
||||
T deserialize(ByteBuffer b, boolean reuse) throws IOException;
|
||||
public T deserialize(ByteBuffer b, boolean reuse) throws IOException;
|
||||
|
||||
/**
|
||||
* Get the identifier of this deserialiser. Identifier is unique for each
|
||||
* deserializer and generated by {@link CacheableDeserializerIdManager}
|
||||
* @return identifier number of this cacheable deserializer
|
||||
*/
|
||||
int getDeserialiserIdentifier();
|
||||
public int getDeserialiserIdentifier();
|
||||
}
|
||||
|
|
|
@ -49,9 +49,8 @@ public interface HFileDataBlockEncoder {
|
|||
* generated).
|
||||
* @return non null block which is coded according to the settings.
|
||||
*/
|
||||
HFileBlock diskToCacheFormat(
|
||||
HFileBlock block, boolean isCompaction
|
||||
);
|
||||
public HFileBlock diskToCacheFormat(HFileBlock block,
|
||||
boolean isCompaction);
|
||||
|
||||
/**
|
||||
* Should be called before an encoded or unencoded data block is written to
|
||||
|
@ -61,39 +60,37 @@ public interface HFileDataBlockEncoder {
|
|||
* @param blockType block type
|
||||
* @throws IOException
|
||||
*/
|
||||
void beforeWriteToDisk(
|
||||
ByteBuffer in,
|
||||
boolean includesMemstoreTS,
|
||||
public void beforeWriteToDisk(
|
||||
ByteBuffer in, boolean includesMemstoreTS,
|
||||
HFileBlockEncodingContext encodingResult,
|
||||
BlockType blockType
|
||||
) throws IOException;
|
||||
BlockType blockType) throws IOException;
|
||||
|
||||
/**
|
||||
* Decides whether we should use a scanner over encoded blocks.
|
||||
* @param isCompaction whether we are in a compaction.
|
||||
* @return Whether to use encoded scanner.
|
||||
*/
|
||||
boolean useEncodedScanner(boolean isCompaction);
|
||||
public boolean useEncodedScanner(boolean isCompaction);
|
||||
|
||||
/**
|
||||
* Save metadata in HFile which will be written to disk
|
||||
* @param writer writer for a given HFile
|
||||
* @exception IOException on disk problems
|
||||
*/
|
||||
void saveMetadata(HFile.Writer writer)
|
||||
public void saveMetadata(HFile.Writer writer)
|
||||
throws IOException;
|
||||
|
||||
/** @return the on-disk data block encoding */
|
||||
DataBlockEncoding getEncodingOnDisk();
|
||||
public DataBlockEncoding getEncodingOnDisk();
|
||||
|
||||
/** @return the preferred in-cache data block encoding for normal reads */
|
||||
DataBlockEncoding getEncodingInCache();
|
||||
public DataBlockEncoding getEncodingInCache();
|
||||
|
||||
/**
|
||||
* @return the effective in-cache data block encoding, taking into account
|
||||
* whether we are doing a compaction.
|
||||
*/
|
||||
DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction);
|
||||
public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction);
|
||||
|
||||
/**
|
||||
* Create an encoder specific encoding context object for writing. And the
|
||||
|
@ -104,9 +101,8 @@ public interface HFileDataBlockEncoder {
|
|||
* @param headerBytes header bytes
|
||||
* @return a new {@link HFileBlockEncodingContext} object
|
||||
*/
|
||||
HFileBlockEncodingContext newOnDiskDataBlockEncodingContext(
|
||||
Algorithm compressionAlgorithm, byte[] headerBytes
|
||||
);
|
||||
public HFileBlockEncodingContext newOnDiskDataBlockEncodingContext(
|
||||
Algorithm compressionAlgorithm, byte[] headerBytes);
|
||||
|
||||
/**
|
||||
* create a encoder specific decoding context for reading. And the
|
||||
|
@ -116,8 +112,7 @@ public interface HFileDataBlockEncoder {
|
|||
* @param compressionAlgorithm
|
||||
* @return a new {@link HFileBlockDecodingContext} object
|
||||
*/
|
||||
HFileBlockDecodingContext newOnDiskDataBlockDecodingContext(
|
||||
Algorithm compressionAlgorithm
|
||||
);
|
||||
public HFileBlockDecodingContext newOnDiskDataBlockDecodingContext(
|
||||
Algorithm compressionAlgorithm);
|
||||
|
||||
}
|
||||
|
|
|
@ -54,8 +54,8 @@ public interface HFileScanner {
|
|||
* false when it is called.
|
||||
* @throws IOException
|
||||
*/
|
||||
int seekTo(byte[] key) throws IOException;
|
||||
int seekTo(byte[] key, int offset, int length) throws IOException;
|
||||
public int seekTo(byte[] key) throws IOException;
|
||||
public int seekTo(byte[] key, int offset, int length) throws IOException;
|
||||
/**
|
||||
* Reseek to or just before the passed <code>key</code>. Similar to seekTo
|
||||
* except that this can be called even if the scanner is not at the beginning
|
||||
|
@ -76,8 +76,8 @@ public interface HFileScanner {
|
|||
* 1, such that k[i] < key, and scanner is left in position i.
|
||||
* @throws IOException
|
||||
*/
|
||||
int reseekTo(byte[] key) throws IOException;
|
||||
int reseekTo(byte[] key, int offset, int length) throws IOException;
|
||||
public int reseekTo(byte[] key) throws IOException;
|
||||
public int reseekTo(byte[] key, int offset, int length) throws IOException;
|
||||
/**
|
||||
* Consider the key stream of all the keys in the file,
|
||||
* <code>k[0] .. k[n]</code>, where there are n keys in the file.
|
||||
|
@ -88,28 +88,28 @@ public interface HFileScanner {
|
|||
* return false (EOF).
|
||||
* @throws IOException
|
||||
*/
|
||||
boolean seekBefore(byte[] key) throws IOException;
|
||||
boolean seekBefore(byte[] key, int offset, int length) throws IOException;
|
||||
public boolean seekBefore(byte [] key) throws IOException;
|
||||
public boolean seekBefore(byte []key, int offset, int length) throws IOException;
|
||||
/**
|
||||
* Positions this scanner at the start of the file.
|
||||
* @return False if empty file; i.e. a call to next would return false and
|
||||
* the current key and value are undefined.
|
||||
* @throws IOException
|
||||
*/
|
||||
boolean seekTo() throws IOException;
|
||||
public boolean seekTo() throws IOException;
|
||||
/**
|
||||
* Scans to the next entry in the file.
|
||||
* @return Returns false if you are at the end otherwise true if more in file.
|
||||
* @throws IOException
|
||||
*/
|
||||
boolean next() throws IOException;
|
||||
public boolean next() throws IOException;
|
||||
/**
|
||||
* Gets a buffer view to the current key. You must call
|
||||
* {@link #seekTo(byte[])} before this method.
|
||||
* @return byte buffer for the key. The limit is set to the key size, and the
|
||||
* position is 0, the start of the buffer view.
|
||||
*/
|
||||
ByteBuffer getKey();
|
||||
public ByteBuffer getKey();
|
||||
/**
|
||||
* Gets a buffer view to the current value. You must call
|
||||
* {@link #seekTo(byte[])} before this method.
|
||||
|
@ -117,31 +117,31 @@ public interface HFileScanner {
|
|||
* @return byte buffer for the value. The limit is set to the value size, and
|
||||
* the position is 0, the start of the buffer view.
|
||||
*/
|
||||
ByteBuffer getValue();
|
||||
public ByteBuffer getValue();
|
||||
/**
|
||||
* @return Instance of {@link KeyValue}.
|
||||
*/
|
||||
KeyValue getKeyValue();
|
||||
public KeyValue getKeyValue();
|
||||
/**
|
||||
* Convenience method to get a copy of the key as a string - interpreting the
|
||||
* bytes as UTF8. You must call {@link #seekTo(byte[])} before this method.
|
||||
* @return key as a string
|
||||
*/
|
||||
String getKeyString();
|
||||
public String getKeyString();
|
||||
/**
|
||||
* Convenience method to get a copy of the value as a string - interpreting
|
||||
* the bytes as UTF8. You must call {@link #seekTo(byte[])} before this method.
|
||||
* @return value as a string
|
||||
*/
|
||||
String getValueString();
|
||||
public String getValueString();
|
||||
/**
|
||||
* @return Reader that underlies this Scanner instance.
|
||||
*/
|
||||
HFile.Reader getReader();
|
||||
public HFile.Reader getReader();
|
||||
/**
|
||||
* @return True is scanner has had one of the seek calls invoked; i.e.
|
||||
* {@link #seekBefore(byte[])} or {@link #seekTo()} or {@link #seekTo(byte[])}.
|
||||
* Otherwise returns false.
|
||||
*/
|
||||
boolean isSeeked();
|
||||
public boolean isSeeked();
|
||||
}
|
||||
|
|
|
@ -35,17 +35,17 @@ public interface Delayable {
|
|||
* should be set when ending the delay or right away. There are cases when
|
||||
* the return value can be set right away, even if the call is delayed.
|
||||
*/
|
||||
void startDelay(boolean delayReturnValue);
|
||||
public void startDelay(boolean delayReturnValue);
|
||||
|
||||
/**
|
||||
* @return is the call delayed?
|
||||
*/
|
||||
boolean isDelayed();
|
||||
public boolean isDelayed();
|
||||
|
||||
/**
|
||||
* @return is the return value delayed?
|
||||
*/
|
||||
boolean isReturnValueDelayed();
|
||||
public boolean isReturnValueDelayed();
|
||||
|
||||
/**
|
||||
* Signal that the RPC server is now allowed to send the response.
|
||||
|
@ -54,14 +54,14 @@ public interface Delayable {
|
|||
* not be delayed, this parameter must be null.
|
||||
* @throws IOException
|
||||
*/
|
||||
void endDelay(Object result) throws IOException;
|
||||
public void endDelay(Object result) throws IOException;
|
||||
|
||||
/**
|
||||
* Signal the end of a delayed RPC, without specifying the return value. Use
|
||||
* this only if the return value was not delayed
|
||||
* @throws IOException
|
||||
*/
|
||||
void endDelay() throws IOException;
|
||||
public void endDelay() throws IOException;
|
||||
|
||||
/**
|
||||
* End the call, throwing and exception to the caller. This works regardless
|
||||
|
@ -69,5 +69,5 @@ public interface Delayable {
|
|||
* @param t Object to throw to the client.
|
||||
* @throws IOException
|
||||
*/
|
||||
void endDelayThrowing(Throwable t) throws IOException;
|
||||
public void endDelayThrowing(Throwable t) throws IOException;
|
||||
}
|
|
@ -31,5 +31,5 @@ public interface HBaseRPCErrorHandler {
|
|||
* @param e the throwable
|
||||
* @return if the server should be shut down
|
||||
*/
|
||||
boolean checkOOME(final Throwable e) ;
|
||||
public boolean checkOOME(final Throwable e) ;
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ public interface RpcServerInterface {
|
|||
*/
|
||||
MetricsHBaseServer getMetrics();
|
||||
|
||||
void setQosFunction(Function<Pair<RequestHeader, Message>, Integer> newFunc);
|
||||
public void setQosFunction(Function<Pair<RequestHeader, Message>, Integer> newFunc);
|
||||
|
||||
/**
|
||||
* Refresh autentication manager policy.
|
||||
|
|
|
@ -1145,11 +1145,11 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
/**
|
||||
* A specific runnable that works only on a region.
|
||||
*/
|
||||
private interface RegionRunnable extends Runnable{
|
||||
private static interface RegionRunnable extends Runnable{
|
||||
/**
|
||||
* @return - the name of the region it works on.
|
||||
*/
|
||||
String getRegionName();
|
||||
public String getRegionName();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -220,14 +220,14 @@ public class ClusterStatusPublisher extends Chore {
|
|||
}
|
||||
|
||||
|
||||
public interface Publisher extends Closeable {
|
||||
public static interface Publisher extends Closeable {
|
||||
|
||||
void connect(Configuration conf) throws IOException;
|
||||
public void connect(Configuration conf) throws IOException;
|
||||
|
||||
void publish(ClusterStatus cs);
|
||||
public void publish(ClusterStatus cs);
|
||||
|
||||
@Override
|
||||
void close();
|
||||
public void close();
|
||||
}
|
||||
|
||||
public static class MulticastPublisher implements Publisher {
|
||||
|
|
|
@ -50,21 +50,21 @@ public interface LoadBalancer extends Configurable {
|
|||
* Set the current cluster status. This allows a LoadBalancer to map host name to a server
|
||||
* @param st
|
||||
*/
|
||||
void setClusterStatus(ClusterStatus st);
|
||||
public void setClusterStatus(ClusterStatus st);
|
||||
|
||||
|
||||
/**
|
||||
* Set the master service.
|
||||
* @param masterServices
|
||||
*/
|
||||
void setMasterServices(MasterServices masterServices);
|
||||
public void setMasterServices(MasterServices masterServices);
|
||||
|
||||
/**
|
||||
* Perform the major balance operation
|
||||
* @param clusterState
|
||||
* @return List of plans
|
||||
*/
|
||||
List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState);
|
||||
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState);
|
||||
|
||||
/**
|
||||
* Perform a Round Robin assignment of regions.
|
||||
|
@ -72,10 +72,7 @@ public interface LoadBalancer extends Configurable {
|
|||
* @param servers
|
||||
* @return Map of servername to regioninfos
|
||||
*/
|
||||
Map<ServerName, List<HRegionInfo>> roundRobinAssignment(
|
||||
List<HRegionInfo> regions,
|
||||
List<ServerName> servers
|
||||
);
|
||||
public Map<ServerName, List<HRegionInfo>> roundRobinAssignment(List<HRegionInfo> regions, List<ServerName> servers);
|
||||
|
||||
/**
|
||||
* Assign regions to the previously hosting region server
|
||||
|
@ -83,10 +80,7 @@ public interface LoadBalancer extends Configurable {
|
|||
* @param servers
|
||||
* @return List of plans
|
||||
*/
|
||||
Map<ServerName, List<HRegionInfo>> retainAssignment(
|
||||
Map<HRegionInfo, ServerName> regions,
|
||||
List<ServerName> servers
|
||||
);
|
||||
public Map<ServerName, List<HRegionInfo>> retainAssignment(Map<HRegionInfo, ServerName> regions, List<ServerName> servers);
|
||||
|
||||
/**
|
||||
* Sync assign a region
|
||||
|
@ -94,10 +88,7 @@ public interface LoadBalancer extends Configurable {
|
|||
* @param servers
|
||||
* @return Map regioninfos to servernames
|
||||
*/
|
||||
Map<HRegionInfo, ServerName> immediateAssignment(
|
||||
List<HRegionInfo> regions,
|
||||
List<ServerName> servers
|
||||
);
|
||||
public Map<HRegionInfo, ServerName> immediateAssignment(List<HRegionInfo> regions, List<ServerName> servers);
|
||||
|
||||
/**
|
||||
* Get a random region server from the list
|
||||
|
@ -105,7 +96,6 @@ public interface LoadBalancer extends Configurable {
|
|||
* @param servers
|
||||
* @return Servername
|
||||
*/
|
||||
ServerName randomAssignment(
|
||||
HRegionInfo regionInfo, List<ServerName> servers
|
||||
);
|
||||
public ServerName randomAssignment(HRegionInfo regionInfo,
|
||||
List<ServerName> servers);
|
||||
}
|
||||
|
|
|
@ -40,32 +40,32 @@ public interface MasterServices extends Server {
|
|||
/**
|
||||
* @return Master's instance of the {@link AssignmentManager}
|
||||
*/
|
||||
AssignmentManager getAssignmentManager();
|
||||
public AssignmentManager getAssignmentManager();
|
||||
|
||||
/**
|
||||
* @return Master's filesystem {@link MasterFileSystem} utility class.
|
||||
*/
|
||||
MasterFileSystem getMasterFileSystem();
|
||||
public MasterFileSystem getMasterFileSystem();
|
||||
|
||||
/**
|
||||
* @return Master's {@link ServerManager} instance.
|
||||
*/
|
||||
ServerManager getServerManager();
|
||||
public ServerManager getServerManager();
|
||||
|
||||
/**
|
||||
* @return Master's instance of {@link ExecutorService}
|
||||
*/
|
||||
ExecutorService getExecutorService();
|
||||
public ExecutorService getExecutorService();
|
||||
|
||||
/**
|
||||
* @return Master's instance of {@link TableLockManager}
|
||||
*/
|
||||
TableLockManager getTableLockManager();
|
||||
public TableLockManager getTableLockManager();
|
||||
|
||||
/**
|
||||
* @return Master's instance of {@link MasterCoprocessorHost}
|
||||
*/
|
||||
MasterCoprocessorHost getCoprocessorHost();
|
||||
public MasterCoprocessorHost getCoprocessorHost();
|
||||
|
||||
/**
|
||||
* Check table is modifiable; i.e. exists and is offline.
|
||||
|
@ -75,7 +75,7 @@ public interface MasterServices extends Server {
|
|||
* @throws IOException
|
||||
*/
|
||||
// We actually throw the exceptions mentioned in the
|
||||
void checkTableModifiable(final byte[] tableName)
|
||||
public void checkTableModifiable(final byte [] tableName)
|
||||
throws IOException, TableNotFoundException, TableNotDisabledException;
|
||||
|
||||
/**
|
||||
|
@ -84,7 +84,7 @@ public interface MasterServices extends Server {
|
|||
* @param splitKeys Starting row keys for the initial table regions. If null
|
||||
* a single region is created.
|
||||
*/
|
||||
void createTable(HTableDescriptor desc, byte[][] splitKeys)
|
||||
public void createTable(HTableDescriptor desc, byte [][] splitKeys)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -92,7 +92,7 @@ public interface MasterServices extends Server {
|
|||
* @param tableName The table name
|
||||
* @throws IOException
|
||||
*/
|
||||
void deleteTable(final byte[] tableName) throws IOException;
|
||||
public void deleteTable(final byte[] tableName) throws IOException;
|
||||
|
||||
/**
|
||||
* Modify the descriptor of an existing table
|
||||
|
@ -100,7 +100,7 @@ public interface MasterServices extends Server {
|
|||
* @param descriptor The updated table descriptor
|
||||
* @throws IOException
|
||||
*/
|
||||
void modifyTable(final byte[] tableName, final HTableDescriptor descriptor)
|
||||
public void modifyTable(final byte[] tableName, final HTableDescriptor descriptor)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -108,14 +108,14 @@ public interface MasterServices extends Server {
|
|||
* @param tableName The table name
|
||||
* @throws IOException
|
||||
*/
|
||||
void enableTable(final byte[] tableName) throws IOException;
|
||||
public void enableTable(final byte[] tableName) throws IOException;
|
||||
|
||||
/**
|
||||
* Disable an existing table
|
||||
* @param tableName The table name
|
||||
* @throws IOException
|
||||
*/
|
||||
void disableTable(final byte[] tableName) throws IOException;
|
||||
public void disableTable(final byte[] tableName) throws IOException;
|
||||
|
||||
/**
|
||||
* Add a new column to an existing table
|
||||
|
@ -123,7 +123,7 @@ public interface MasterServices extends Server {
|
|||
* @param column The column definition
|
||||
* @throws IOException
|
||||
*/
|
||||
void addColumn(final byte[] tableName, final HColumnDescriptor column)
|
||||
public void addColumn(final byte[] tableName, final HColumnDescriptor column)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -132,7 +132,7 @@ public interface MasterServices extends Server {
|
|||
* @param descriptor The updated column definition
|
||||
* @throws IOException
|
||||
*/
|
||||
void modifyColumn(byte[] tableName, HColumnDescriptor descriptor)
|
||||
public void modifyColumn(byte[] tableName, HColumnDescriptor descriptor)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -141,18 +141,18 @@ public interface MasterServices extends Server {
|
|||
* @param columnName The column name
|
||||
* @throws IOException
|
||||
*/
|
||||
void deleteColumn(final byte[] tableName, final byte[] columnName)
|
||||
public void deleteColumn(final byte[] tableName, final byte[] columnName)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* @return Return table descriptors implementation.
|
||||
*/
|
||||
TableDescriptors getTableDescriptors();
|
||||
public TableDescriptors getTableDescriptors();
|
||||
|
||||
/**
|
||||
* @return true if master enables ServerShutdownHandler;
|
||||
*/
|
||||
boolean isServerShutdownHandlerEnabled();
|
||||
public boolean isServerShutdownHandlerEnabled();
|
||||
|
||||
/**
|
||||
* Registers a new protocol buffer {@link Service} subclass as a master coprocessor endpoint.
|
||||
|
@ -167,7 +167,7 @@ public interface MasterServices extends Server {
|
|||
* @return {@code true} if the registration was successful, {@code false}
|
||||
* otherwise
|
||||
*/
|
||||
boolean registerService(Service instance);
|
||||
public boolean registerService(Service instance);
|
||||
|
||||
/**
|
||||
* Merge two regions. The real implementation is on the regionserver, master
|
||||
|
@ -178,13 +178,12 @@ public interface MasterServices extends Server {
|
|||
* two adjacent regions
|
||||
* @throws IOException
|
||||
*/
|
||||
void dispatchMergingRegions(
|
||||
final HRegionInfo region_a, final HRegionInfo region_b, final boolean forcible
|
||||
) throws IOException;
|
||||
public void dispatchMergingRegions(final HRegionInfo region_a,
|
||||
final HRegionInfo region_b, final boolean forcible) throws IOException;
|
||||
|
||||
/**
|
||||
* @return true if master is initialized
|
||||
*/
|
||||
boolean isInitialized();
|
||||
public boolean isInitialized();
|
||||
|
||||
}
|
||||
|
|
|
@ -34,30 +34,30 @@ public interface SnapshotSentinel {
|
|||
* @return <tt>false</tt> if the snapshot is still in progress, <tt>true</tt> if the snapshot has
|
||||
* finished
|
||||
*/
|
||||
boolean isFinished();
|
||||
public boolean isFinished();
|
||||
|
||||
/**
|
||||
* @return -1 if the snapshot is in progress, otherwise the completion timestamp.
|
||||
*/
|
||||
long getCompletionTimestamp();
|
||||
public long getCompletionTimestamp();
|
||||
|
||||
/**
|
||||
* Actively cancel a running snapshot.
|
||||
* @param why Reason for cancellation.
|
||||
*/
|
||||
void cancel(String why);
|
||||
public void cancel(String why);
|
||||
|
||||
/**
|
||||
* @return the description of the snapshot being run
|
||||
*/
|
||||
SnapshotDescription getSnapshot();
|
||||
public SnapshotDescription getSnapshot();
|
||||
|
||||
/**
|
||||
* Get the exception that caused the snapshot to fail, if the snapshot has failed.
|
||||
* @return {@link ForeignException} that caused the snapshot to fail, or <tt>null</tt> if the
|
||||
* snapshot is still in progress or has succeeded
|
||||
*/
|
||||
ForeignException getExceptionIfFailed();
|
||||
public ForeignException getExceptionIfFailed();
|
||||
|
||||
/**
|
||||
* Rethrow the exception returned by {@link SnapshotSentinel#getExceptionIfFailed}.
|
||||
|
@ -65,5 +65,5 @@ public interface SnapshotSentinel {
|
|||
*
|
||||
* @throws ForeignException all exceptions from remote sources are procedure exceptions
|
||||
*/
|
||||
void rethrowExceptionIfFailed() throws ForeignException;
|
||||
public void rethrowExceptionIfFailed() throws ForeignException;
|
||||
}
|
||||
|
|
|
@ -1593,11 +1593,11 @@ public class SplitLogManager extends ZooKeeperListener {
|
|||
* a serialization point at the end of the task processing. Must be
|
||||
* restartable and idempotent.
|
||||
*/
|
||||
public interface TaskFinisher {
|
||||
static public interface TaskFinisher {
|
||||
/**
|
||||
* status that can be returned finish()
|
||||
*/
|
||||
enum Status {
|
||||
static public enum Status {
|
||||
/**
|
||||
* task completed successfully
|
||||
*/
|
||||
|
@ -1616,7 +1616,7 @@ public class SplitLogManager extends ZooKeeperListener {
|
|||
* @param taskname
|
||||
* @return DONE if task completed successfully, ERR otherwise
|
||||
*/
|
||||
Status finish(ServerName workerName, String taskname);
|
||||
public Status finish(ServerName workerName, String taskname);
|
||||
}
|
||||
|
||||
enum ResubmitDirective {
|
||||
|
|
|
@ -82,20 +82,20 @@ public abstract class TableLockManager {
|
|||
* A distributed lock for a table.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface TableLock {
|
||||
public static interface TableLock {
|
||||
/**
|
||||
* Acquire the lock, with the configured lock timeout.
|
||||
* @throws LockTimeoutException If unable to acquire a lock within a specified
|
||||
* time period (if any)
|
||||
* @throws IOException If unrecoverable error occurs
|
||||
*/
|
||||
void acquire() throws IOException;
|
||||
public void acquire() throws IOException;
|
||||
|
||||
/**
|
||||
* Release the lock already held.
|
||||
* @throws IOException If there is an unrecoverable error releasing the lock
|
||||
*/
|
||||
void release() throws IOException;
|
||||
public void release() throws IOException;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -35,5 +35,5 @@ public interface FileCleanerDelegate extends Configurable, Stoppable {
|
|||
* @param fStat file status of the file to check
|
||||
* @return <tt>true</tt> if the file is deletable, <tt>false</tt> if not
|
||||
*/
|
||||
boolean isFileDeletable(FileStatus fStat);
|
||||
public boolean isFileDeletable(FileStatus fStat);
|
||||
}
|
||||
|
|
|
@ -33,5 +33,5 @@ public interface TotesHRegionInfo {
|
|||
/**
|
||||
* @return HRegionInfo instance.
|
||||
*/
|
||||
HRegionInfo getHRegionInfo();
|
||||
public HRegionInfo getHRegionInfo();
|
||||
}
|
||||
|
|
|
@ -29,16 +29,16 @@ import com.google.protobuf.Message;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface MonitoredRPCHandler extends MonitoredTask {
|
||||
String getRPC();
|
||||
String getRPC(boolean withParams);
|
||||
long getRPCPacketLength();
|
||||
String getClient();
|
||||
long getRPCStartTime();
|
||||
long getRPCQueueTime();
|
||||
boolean isRPCRunning();
|
||||
boolean isOperationRunning();
|
||||
public abstract String getRPC();
|
||||
public abstract String getRPC(boolean withParams);
|
||||
public abstract long getRPCPacketLength();
|
||||
public abstract String getClient();
|
||||
public abstract long getRPCStartTime();
|
||||
public abstract long getRPCQueueTime();
|
||||
public abstract boolean isRPCRunning();
|
||||
public abstract boolean isOperationRunning();
|
||||
|
||||
void setRPC(String methodName, Object[] params, long queueTime);
|
||||
void setRPCPacket(Message param);
|
||||
void setConnection(String clientAddress, int remotePort);
|
||||
public abstract void setRPC(String methodName, Object [] params, long queueTime);
|
||||
public abstract void setRPCPacket(Message param);
|
||||
public abstract void setConnection(String clientAddress, int remotePort);
|
||||
}
|
||||
|
|
|
@ -32,47 +32,47 @@ public interface MonitoredTask extends Cloneable {
|
|||
ABORTED;
|
||||
}
|
||||
|
||||
long getStartTime();
|
||||
String getDescription();
|
||||
String getStatus();
|
||||
long getStatusTime();
|
||||
State getState();
|
||||
long getStateTime();
|
||||
long getCompletionTimestamp();
|
||||
public abstract long getStartTime();
|
||||
public abstract String getDescription();
|
||||
public abstract String getStatus();
|
||||
public abstract long getStatusTime();
|
||||
public abstract State getState();
|
||||
public abstract long getStateTime();
|
||||
public abstract long getCompletionTimestamp();
|
||||
|
||||
void markComplete(String msg);
|
||||
void pause(String msg);
|
||||
void resume(String msg);
|
||||
void abort(String msg);
|
||||
void expireNow();
|
||||
public abstract void markComplete(String msg);
|
||||
public abstract void pause(String msg);
|
||||
public abstract void resume(String msg);
|
||||
public abstract void abort(String msg);
|
||||
public abstract void expireNow();
|
||||
|
||||
void setStatus(String status);
|
||||
void setDescription(String description);
|
||||
public abstract void setStatus(String status);
|
||||
public abstract void setDescription(String description);
|
||||
|
||||
/**
|
||||
* Explicitly mark this status as able to be cleaned up,
|
||||
* even though it might not be complete.
|
||||
*/
|
||||
void cleanup();
|
||||
public abstract void cleanup();
|
||||
|
||||
/**
|
||||
* Public exposure of Object.clone() in order to allow clients to easily
|
||||
* capture current state.
|
||||
* @return a copy of the object whose references will not change
|
||||
*/
|
||||
MonitoredTask clone();
|
||||
public abstract MonitoredTask clone();
|
||||
|
||||
/**
|
||||
* Creates a string map of internal details for extensible exposure of
|
||||
* monitored tasks.
|
||||
* @return A Map containing information for this task.
|
||||
*/
|
||||
Map<String, Object> toMap() throws IOException;
|
||||
public abstract Map<String, Object> toMap() throws IOException;
|
||||
|
||||
/**
|
||||
* Creates a JSON object for parseable exposure of monitored tasks.
|
||||
* @return An encoded JSON object containing information for this task.
|
||||
*/
|
||||
String toJSON() throws IOException;
|
||||
public abstract String toJSON() throws IOException;
|
||||
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ public interface ProcedureCoordinatorRpcs extends Closeable {
|
|||
* @param listener
|
||||
* @return true if succeed, false if encountered initialization errors.
|
||||
*/
|
||||
boolean start(final ProcedureCoordinator listener);
|
||||
public boolean start(final ProcedureCoordinator listener);
|
||||
|
||||
/**
|
||||
* Notify the members that the coordinator has aborted the procedure and that it should release
|
||||
|
@ -50,7 +50,7 @@ public interface ProcedureCoordinatorRpcs extends Closeable {
|
|||
* @throws IOException if the rpcs can't reach the other members of the procedure (and can't
|
||||
* recover).
|
||||
*/
|
||||
void sendAbortToMembers(Procedure procName, ForeignException cause) throws IOException;
|
||||
public void sendAbortToMembers(Procedure procName, ForeignException cause) throws IOException;
|
||||
|
||||
/**
|
||||
* Notify the members to acquire barrier for the procedure
|
||||
|
@ -61,7 +61,7 @@ public interface ProcedureCoordinatorRpcs extends Closeable {
|
|||
* @throws IllegalArgumentException if the procedure was already marked as failed
|
||||
* @throws IOException if we can't reach the remote notification mechanism
|
||||
*/
|
||||
void sendGlobalBarrierAcquire(Procedure procName, byte[] info, List<String> members)
|
||||
public void sendGlobalBarrierAcquire(Procedure procName, byte[] info, List<String> members)
|
||||
throws IOException, IllegalArgumentException;
|
||||
|
||||
/**
|
||||
|
@ -74,12 +74,12 @@ public interface ProcedureCoordinatorRpcs extends Closeable {
|
|||
* @param members members to tell we have reached in-barrier phase
|
||||
* @throws IOException if we can't reach the remote notification mechanism
|
||||
*/
|
||||
void sendGlobalBarrierReached(Procedure procName, List<String> members) throws IOException;
|
||||
public void sendGlobalBarrierReached(Procedure procName, List<String> members) throws IOException;
|
||||
|
||||
/**
|
||||
* Notify Members to reset the distributed state for procedure
|
||||
* @param procName name of the procedure to reset
|
||||
* @throws IOException if the remote notification mechanism cannot be reached
|
||||
*/
|
||||
void resetMembers(Procedure procName) throws IOException;
|
||||
public void resetMembers(Procedure procName) throws IOException;
|
||||
}
|
||||
|
|
|
@ -35,13 +35,13 @@ public interface ProcedureMemberRpcs extends Closeable {
|
|||
/**
|
||||
* Initialize and start any threads or connections the member needs.
|
||||
*/
|
||||
void start(final String memberName, final ProcedureMember member);
|
||||
public void start(final String memberName, final ProcedureMember member);
|
||||
|
||||
/**
|
||||
* Each subprocedure is being executed on a member. This is the identifier for the member.
|
||||
* @return the member name
|
||||
*/
|
||||
String getMemberName();
|
||||
public String getMemberName();
|
||||
|
||||
/**
|
||||
* Notify the coordinator that we aborted the specified {@link Subprocedure}
|
||||
|
@ -51,7 +51,7 @@ public interface ProcedureMemberRpcs extends Closeable {
|
|||
* @throws IOException thrown when the rpcs can't reach the other members of the procedure (and
|
||||
* thus can't recover).
|
||||
*/
|
||||
void sendMemberAborted(Subprocedure sub, ForeignException cause) throws IOException;
|
||||
public void sendMemberAborted(Subprocedure sub, ForeignException cause) throws IOException;
|
||||
|
||||
/**
|
||||
* Notify the coordinator that the specified {@link Subprocedure} has acquired the locally required
|
||||
|
@ -60,7 +60,7 @@ public interface ProcedureMemberRpcs extends Closeable {
|
|||
* @param sub the specified {@link Subprocedure}
|
||||
* @throws IOException if we can't reach the coordinator
|
||||
*/
|
||||
void sendMemberAcquired(Subprocedure sub) throws IOException;
|
||||
public void sendMemberAcquired(Subprocedure sub) throws IOException;
|
||||
|
||||
/**
|
||||
* Notify the coordinator that the specified {@link Subprocedure} has completed the work that
|
||||
|
@ -69,5 +69,5 @@ public interface ProcedureMemberRpcs extends Closeable {
|
|||
* @param sub the specified {@link Subprocedure}
|
||||
* @throws IOException if we can't reach the coordinator
|
||||
*/
|
||||
void sendMemberCompleted(Subprocedure sub) throws IOException;
|
||||
public void sendMemberCompleted(Subprocedure sub) throws IOException;
|
||||
}
|
|
@ -36,5 +36,5 @@ public interface SubprocedureFactory {
|
|||
* request
|
||||
* @throws IllegalStateException if the current runner cannot accept any more new requests
|
||||
*/
|
||||
Subprocedure buildSubprocedure(String procName, byte[] procArgs);
|
||||
public Subprocedure buildSubprocedure(String procName, byte[] procArgs);
|
||||
}
|
||||
|
|
|
@ -56,26 +56,25 @@ public interface ColumnTracker {
|
|||
* @throws IOException in case there is an internal consistency problem
|
||||
* caused by a data corruption.
|
||||
*/
|
||||
ScanQueryMatcher.MatchCode checkColumn(
|
||||
byte[] bytes, int offset, int length, long ttl, byte type, boolean ignoreCount
|
||||
)
|
||||
public ScanQueryMatcher.MatchCode checkColumn(byte[] bytes, int offset,
|
||||
int length, long ttl, byte type, boolean ignoreCount)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Updates internal variables in between files
|
||||
*/
|
||||
void update();
|
||||
public void update();
|
||||
|
||||
/**
|
||||
* Resets the Matcher
|
||||
*/
|
||||
void reset();
|
||||
public void reset();
|
||||
|
||||
/**
|
||||
*
|
||||
* @return <code>true</code> when done.
|
||||
*/
|
||||
boolean done();
|
||||
public boolean done();
|
||||
|
||||
/**
|
||||
* Used by matcher and scan/get to get a hint of the next column
|
||||
|
@ -88,14 +87,13 @@ public interface ColumnTracker {
|
|||
*
|
||||
* @return null, or a ColumnCount that we should seek to
|
||||
*/
|
||||
ColumnCount getColumnHint();
|
||||
public ColumnCount getColumnHint();
|
||||
|
||||
/**
|
||||
* Retrieve the MatchCode for the next row or column
|
||||
*/
|
||||
MatchCode getNextRowOrNextColumn(
|
||||
byte[] bytes, int offset, int qualLength
|
||||
);
|
||||
public MatchCode getNextRowOrNextColumn(byte[] bytes, int offset,
|
||||
int qualLength);
|
||||
|
||||
/**
|
||||
* Give the tracker a chance to declare it's done based on only the timestamp
|
||||
|
@ -104,5 +102,5 @@ public interface ColumnTracker {
|
|||
* @param timestamp
|
||||
* @return <code>true</code> to early out based on timestamp.
|
||||
*/
|
||||
boolean isDone(long timestamp);
|
||||
public boolean isDone(long timestamp);
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ public interface CompactionRequestor {
|
|||
* compactions were started
|
||||
* @throws IOException
|
||||
*/
|
||||
List<CompactionRequest> requestCompaction(final HRegion r, final String why)
|
||||
public List<CompactionRequest> requestCompaction(final HRegion r, final String why)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -47,9 +47,8 @@ public interface CompactionRequestor {
|
|||
* compactions were started
|
||||
* @throws IOException
|
||||
*/
|
||||
List<CompactionRequest> requestCompaction(
|
||||
final HRegion r, final String why, List<Pair<CompactionRequest, Store>> requests
|
||||
)
|
||||
public List<CompactionRequest> requestCompaction(final HRegion r, final String why,
|
||||
List<Pair<CompactionRequest, Store>> requests)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -61,9 +60,8 @@ public interface CompactionRequestor {
|
|||
* @return The created {@link CompactionRequest} or <tt>null</tt> if no compaction was started.
|
||||
* @throws IOException
|
||||
*/
|
||||
CompactionRequest requestCompaction(
|
||||
final HRegion r, final Store s, final String why, CompactionRequest request
|
||||
) throws IOException;
|
||||
public CompactionRequest requestCompaction(final HRegion r, final Store s, final String why,
|
||||
CompactionRequest request) throws IOException;
|
||||
|
||||
/**
|
||||
* @param r Region to compact
|
||||
|
@ -76,9 +74,8 @@ public interface CompactionRequestor {
|
|||
* compactions were started.
|
||||
* @throws IOException
|
||||
*/
|
||||
List<CompactionRequest> requestCompaction(
|
||||
final HRegion r, final String why, int pri, List<Pair<CompactionRequest, Store>> requests
|
||||
) throws IOException;
|
||||
public List<CompactionRequest> requestCompaction(final HRegion r, final String why, int pri,
|
||||
List<Pair<CompactionRequest, Store>> requests) throws IOException;
|
||||
|
||||
/**
|
||||
* @param r Region to compact
|
||||
|
@ -90,7 +87,6 @@ public interface CompactionRequestor {
|
|||
* @return The created {@link CompactionRequest} or <tt>null</tt> if no compaction was started
|
||||
* @throws IOException
|
||||
*/
|
||||
CompactionRequest requestCompaction(
|
||||
final HRegion r, final Store s, final String why, int pri, CompactionRequest request
|
||||
) throws IOException;
|
||||
public CompactionRequest requestCompaction(final HRegion r, final Store s, final String why,
|
||||
int pri, CompactionRequest request) throws IOException;
|
||||
}
|
||||
|
|
|
@ -43,9 +43,8 @@ public interface DeleteTracker {
|
|||
* @param timestamp timestamp
|
||||
* @param type delete type as byte
|
||||
*/
|
||||
void add(
|
||||
byte[] buffer, int qualifierOffset, int qualifierLength, long timestamp, byte type
|
||||
);
|
||||
public void add(byte [] buffer, int qualifierOffset, int qualifierLength,
|
||||
long timestamp, byte type);
|
||||
|
||||
/**
|
||||
* Check if the specified KeyValue buffer has been deleted by a previously
|
||||
|
@ -56,14 +55,13 @@ public interface DeleteTracker {
|
|||
* @param timestamp timestamp
|
||||
* @return deleteResult The result tells whether the KeyValue is deleted and why
|
||||
*/
|
||||
DeleteResult isDeleted(
|
||||
byte[] buffer, int qualifierOffset, int qualifierLength, long timestamp
|
||||
);
|
||||
public DeleteResult isDeleted(byte [] buffer, int qualifierOffset,
|
||||
int qualifierLength, long timestamp);
|
||||
|
||||
/**
|
||||
* @return true if there are no current delete, false otherwise
|
||||
*/
|
||||
boolean isEmpty();
|
||||
public boolean isEmpty();
|
||||
|
||||
/**
|
||||
* Called at the end of every StoreFile.
|
||||
|
@ -71,14 +69,14 @@ public interface DeleteTracker {
|
|||
* Many optimized implementations of Trackers will require an update at
|
||||
* when the end of each StoreFile is reached.
|
||||
*/
|
||||
void update();
|
||||
public void update();
|
||||
|
||||
/**
|
||||
* Called between rows.
|
||||
* <p>
|
||||
* This clears everything as if a new DeleteTracker was instantiated.
|
||||
*/
|
||||
void reset();
|
||||
public void reset();
|
||||
|
||||
|
||||
/**
|
||||
|
@ -104,7 +102,7 @@ public interface DeleteTracker {
|
|||
* Based on the delete result, the ScanQueryMatcher will decide the next
|
||||
* operation
|
||||
*/
|
||||
enum DeleteResult {
|
||||
public static enum DeleteResult {
|
||||
FAMILY_DELETED, // The KeyValue is deleted by a delete family.
|
||||
FAMILY_VERSION_DELETED, // The KeyValue is deleted by a delete family version.
|
||||
COLUMN_DELETED, // The KeyValue is deleted by a delete column.
|
||||
|
|
|
@ -5602,7 +5602,7 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
* bulkLoadHFile() to perform any necessary
|
||||
* pre/post processing of a given bulkload call
|
||||
*/
|
||||
public interface BulkLoadListener {
|
||||
public static interface BulkLoadListener {
|
||||
|
||||
/**
|
||||
* Called before an HFile is actually loaded
|
||||
|
|
|
@ -47,7 +47,7 @@ public interface InternalScanner extends Closeable {
|
|||
* @return true if more rows exist after this one, false if scanner is done
|
||||
* @throws IOException e
|
||||
*/
|
||||
boolean next(List<KeyValue> results) throws IOException;
|
||||
public boolean next(List<KeyValue> results) throws IOException;
|
||||
|
||||
/**
|
||||
* Grab the next row's worth of values with a limit on the number of values
|
||||
|
@ -57,11 +57,11 @@ public interface InternalScanner extends Closeable {
|
|||
* @return true if more rows exist after this one, false if scanner is done
|
||||
* @throws IOException e
|
||||
*/
|
||||
boolean next(List<KeyValue> result, int limit) throws IOException;
|
||||
public boolean next(List<KeyValue> result, int limit) throws IOException;
|
||||
|
||||
/**
|
||||
* Closes the scanner and releases any resources it has allocated
|
||||
* @throws IOException
|
||||
*/
|
||||
void close() throws IOException;
|
||||
public void close() throws IOException;
|
||||
}
|
||||
|
|
|
@ -34,20 +34,20 @@ public interface KeyValueScanner {
|
|||
* Look at the next KeyValue in this scanner, but do not iterate scanner.
|
||||
* @return the next KeyValue
|
||||
*/
|
||||
KeyValue peek();
|
||||
public KeyValue peek();
|
||||
|
||||
/**
|
||||
* Return the next KeyValue in this scanner, iterating the scanner
|
||||
* @return the next KeyValue
|
||||
*/
|
||||
KeyValue next() throws IOException;
|
||||
public KeyValue next() throws IOException;
|
||||
|
||||
/**
|
||||
* Seek the scanner at or after the specified KeyValue.
|
||||
* @param key seek value
|
||||
* @return true if scanner has values left, false if end of scanner
|
||||
*/
|
||||
boolean seek(KeyValue key) throws IOException;
|
||||
public boolean seek(KeyValue key) throws IOException;
|
||||
|
||||
/**
|
||||
* Reseek the scanner at or after the specified KeyValue.
|
||||
|
@ -57,7 +57,7 @@ public interface KeyValueScanner {
|
|||
* @param key seek value (should be non-null)
|
||||
* @return true if scanner has values left, false if end of scanner
|
||||
*/
|
||||
boolean reseek(KeyValue key) throws IOException;
|
||||
public boolean reseek(KeyValue key) throws IOException;
|
||||
|
||||
/**
|
||||
* Get the sequence id associated with this KeyValueScanner. This is required
|
||||
|
@ -65,12 +65,12 @@ public interface KeyValueScanner {
|
|||
* The default implementation for this would be to return 0. A file having
|
||||
* lower sequence id will be considered to be the older one.
|
||||
*/
|
||||
long getSequenceID();
|
||||
public long getSequenceID();
|
||||
|
||||
/**
|
||||
* Close the KeyValue scanner.
|
||||
*/
|
||||
void close();
|
||||
public void close();
|
||||
|
||||
/**
|
||||
* Allows to filter out scanners (both StoreFile and memstore) that we don't
|
||||
|
@ -82,9 +82,8 @@ public interface KeyValueScanner {
|
|||
* this query, based on TTL
|
||||
* @return true if the scanner should be included in the query
|
||||
*/
|
||||
boolean shouldUseScanner(
|
||||
Scan scan, SortedSet<byte[]> columns, long oldestUnexpiredTS
|
||||
);
|
||||
public boolean shouldUseScanner(Scan scan, SortedSet<byte[]> columns,
|
||||
long oldestUnexpiredTS);
|
||||
|
||||
// "Lazy scanner" optimizations
|
||||
|
||||
|
@ -98,7 +97,7 @@ public interface KeyValueScanner {
|
|||
* @param forward do a forward-only "reseek" instead of a random-access seek
|
||||
* @param useBloom whether to enable multi-column Bloom filter optimization
|
||||
*/
|
||||
boolean requestSeek(KeyValue kv, boolean forward, boolean useBloom)
|
||||
public boolean requestSeek(KeyValue kv, boolean forward, boolean useBloom)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -107,7 +106,7 @@ public interface KeyValueScanner {
|
|||
* store scanner bubbles up to the top of the key-value heap. This method is
|
||||
* then used to ensure the top store file scanner has done a seek operation.
|
||||
*/
|
||||
boolean realSeekDone();
|
||||
public boolean realSeekDone();
|
||||
|
||||
/**
|
||||
* Does the real seek operation in case it was skipped by
|
||||
|
@ -116,11 +115,11 @@ public interface KeyValueScanner {
|
|||
* of the scanners). The easiest way to achieve this is to call
|
||||
* {@link #realSeekDone()} first.
|
||||
*/
|
||||
void enforceSeek() throws IOException;
|
||||
public void enforceSeek() throws IOException;
|
||||
|
||||
/**
|
||||
* @return true if this is a file scanner. Otherwise a memory scanner is
|
||||
* assumed.
|
||||
*/
|
||||
boolean isFileScanner();
|
||||
public boolean isFileScanner();
|
||||
}
|
||||
|
|
|
@ -29,5 +29,5 @@ public interface LastSequenceId {
|
|||
* @param regionName Encoded region name
|
||||
* @return Last flushed sequence Id for regionName or -1 if it can't be determined
|
||||
*/
|
||||
long getLastSequenceId(byte[] regionName);
|
||||
public long getLastSequenceId(byte[] regionName);
|
||||
}
|
||||
|
|
|
@ -32,5 +32,5 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
@InterfaceAudience.Private
|
||||
public interface LeaseListener {
|
||||
/** When a lease expires, this method is called. */
|
||||
void leaseExpired();
|
||||
public void leaseExpired();
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ interface OnlineRegions extends Server {
|
|||
* Add to online regions.
|
||||
* @param r
|
||||
*/
|
||||
void addToOnlineRegions(final HRegion r);
|
||||
public void addToOnlineRegions(final HRegion r);
|
||||
|
||||
/**
|
||||
* This method removes HRegion corresponding to hri from the Map of onlineRegions.
|
||||
|
@ -44,7 +44,7 @@ interface OnlineRegions extends Server {
|
|||
* @param destination Destination, if any, null otherwise.
|
||||
* @return True if we removed a region from online list.
|
||||
*/
|
||||
boolean removeFromOnlineRegions(final HRegion r, ServerName destination);
|
||||
public boolean removeFromOnlineRegions(final HRegion r, ServerName destination);
|
||||
|
||||
/**
|
||||
* Return {@link HRegion} instance.
|
||||
|
@ -54,7 +54,7 @@ interface OnlineRegions extends Server {
|
|||
* @return HRegion for the passed encoded <code>encodedRegionName</code> or
|
||||
* null if named region is not member of the online regions.
|
||||
*/
|
||||
HRegion getFromOnlineRegions(String encodedRegionName);
|
||||
public HRegion getFromOnlineRegions(String encodedRegionName);
|
||||
|
||||
/**
|
||||
* Get all online regions of a table in this RS.
|
||||
|
@ -62,5 +62,5 @@ interface OnlineRegions extends Server {
|
|||
* @return List of HRegion
|
||||
* @throws java.io.IOException
|
||||
*/
|
||||
List<HRegion> getOnlineRegions(byte[] tableName) throws IOException;
|
||||
public List<HRegion> getOnlineRegions(byte[] tableName) throws IOException;
|
||||
}
|
||||
|
|
|
@ -34,13 +34,13 @@ public interface RegionScanner extends InternalScanner {
|
|||
/**
|
||||
* @return The RegionInfo for this scanner.
|
||||
*/
|
||||
HRegionInfo getRegionInfo();
|
||||
public HRegionInfo getRegionInfo();
|
||||
|
||||
/**
|
||||
* @return True if a filter indicates that this scanner will return no further rows.
|
||||
* @throws IOException in case of I/O failure on a filter.
|
||||
*/
|
||||
boolean isFilterDone() throws IOException;
|
||||
public boolean isFilterDone() throws IOException;
|
||||
|
||||
/**
|
||||
* Do a reseek to the required row. Should not be used to seek to a key which
|
||||
|
@ -52,17 +52,17 @@ public interface RegionScanner extends InternalScanner {
|
|||
* if row is null
|
||||
*
|
||||
*/
|
||||
boolean reseek(byte[] row) throws IOException;
|
||||
public boolean reseek(byte[] row) throws IOException;
|
||||
|
||||
/**
|
||||
* @return The preferred max buffersize. See {@link Scan#setMaxResultSize(long)}
|
||||
*/
|
||||
long getMaxResultSize();
|
||||
public long getMaxResultSize();
|
||||
|
||||
/**
|
||||
* @return The Scanner's MVCC readPt see {@link MultiVersionConsistencyControl}
|
||||
*/
|
||||
long getMvccReadPoint();
|
||||
public long getMvccReadPoint();
|
||||
|
||||
/**
|
||||
* Grab the next row's worth of values with the default limit on the number of values
|
||||
|
@ -74,7 +74,7 @@ public interface RegionScanner extends InternalScanner {
|
|||
* @return true if more rows exist after this one, false if scanner is done
|
||||
* @throws IOException e
|
||||
*/
|
||||
boolean nextRaw(List<KeyValue> result) throws IOException;
|
||||
public boolean nextRaw(List<KeyValue> result) throws IOException;
|
||||
|
||||
/**
|
||||
* Grab the next row's worth of values with a limit on the number of values
|
||||
|
@ -102,5 +102,5 @@ public interface RegionScanner extends InternalScanner {
|
|||
* @return true if more rows exist after this one, false if scanner is done
|
||||
* @throws IOException e
|
||||
*/
|
||||
boolean nextRaw(List<KeyValue> result, int limit) throws IOException;
|
||||
public boolean nextRaw(List<KeyValue> result, int limit) throws IOException;
|
||||
}
|
||||
|
|
|
@ -40,31 +40,31 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi
|
|||
/**
|
||||
* @return True if this regionserver is stopping.
|
||||
*/
|
||||
boolean isStopping();
|
||||
public boolean isStopping();
|
||||
|
||||
/** @return the HLog for a particular region. Pass null for getting the
|
||||
* default (common) WAL */
|
||||
HLog getWAL(HRegionInfo regionInfo) throws IOException;
|
||||
public HLog getWAL(HRegionInfo regionInfo) throws IOException;
|
||||
|
||||
/**
|
||||
* @return Implementation of {@link CompactionRequestor} or null.
|
||||
*/
|
||||
CompactionRequestor getCompactionRequester();
|
||||
public CompactionRequestor getCompactionRequester();
|
||||
|
||||
/**
|
||||
* @return Implementation of {@link FlushRequester} or null.
|
||||
*/
|
||||
FlushRequester getFlushRequester();
|
||||
public FlushRequester getFlushRequester();
|
||||
|
||||
/**
|
||||
* @return the RegionServerAccounting for this Region Server
|
||||
*/
|
||||
RegionServerAccounting getRegionServerAccounting();
|
||||
public RegionServerAccounting getRegionServerAccounting();
|
||||
|
||||
/**
|
||||
* @return RegionServer's instance of {@link TableLockManager}
|
||||
*/
|
||||
TableLockManager getTableLockManager();
|
||||
public TableLockManager getTableLockManager();
|
||||
|
||||
/**
|
||||
* Tasks to perform after region open to complete deploy of region on
|
||||
|
@ -75,42 +75,42 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi
|
|||
* @throws KeeperException
|
||||
* @throws IOException
|
||||
*/
|
||||
void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
|
||||
public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
|
||||
throws KeeperException, IOException;
|
||||
|
||||
/**
|
||||
* Returns a reference to the region server's RPC server
|
||||
*/
|
||||
RpcServerInterface getRpcServer();
|
||||
public RpcServerInterface getRpcServer();
|
||||
|
||||
/**
|
||||
* Get the regions that are currently being opened or closed in the RS
|
||||
* @return map of regions in transition in this RS
|
||||
*/
|
||||
ConcurrentMap<byte[], Boolean> getRegionsInTransitionInRS();
|
||||
public ConcurrentMap<byte[], Boolean> getRegionsInTransitionInRS();
|
||||
|
||||
/**
|
||||
* @return Return the FileSystem object used by the regionserver
|
||||
*/
|
||||
FileSystem getFileSystem();
|
||||
public FileSystem getFileSystem();
|
||||
|
||||
/**
|
||||
* @return The RegionServer's "Leases" service
|
||||
*/
|
||||
Leases getLeases();
|
||||
public Leases getLeases();
|
||||
|
||||
/**
|
||||
* @return hbase executor service
|
||||
*/
|
||||
ExecutorService getExecutorService();
|
||||
public ExecutorService getExecutorService();
|
||||
|
||||
/**
|
||||
* @return The RegionServer's CatalogTracker
|
||||
*/
|
||||
CatalogTracker getCatalogTracker();
|
||||
public CatalogTracker getCatalogTracker();
|
||||
|
||||
/**
|
||||
* @return set of recovering regions on the hosting region server
|
||||
*/
|
||||
Map<String, HRegion> getRecoveringRegions();
|
||||
public Map<String, HRegion> getRecoveringRegions();
|
||||
}
|
||||
|
|
|
@ -38,18 +38,17 @@ public interface ReplicationService {
|
|||
* Initializes the replication service object.
|
||||
* @throws IOException
|
||||
*/
|
||||
void initialize(
|
||||
Server rs, FileSystem fs, Path logdir, Path oldLogDir
|
||||
) throws IOException;
|
||||
public void initialize(Server rs, FileSystem fs, Path logdir,
|
||||
Path oldLogDir) throws IOException;
|
||||
|
||||
/**
|
||||
* Start replication services.
|
||||
* @throws IOException
|
||||
*/
|
||||
void startReplicationService() throws IOException;
|
||||
public void startReplicationService() throws IOException;
|
||||
|
||||
/**
|
||||
* Stops replication service.
|
||||
*/
|
||||
void stopReplicationService();
|
||||
public void stopReplicationService();
|
||||
}
|
||||
|
|
|
@ -38,5 +38,5 @@ public interface ReplicationSinkService extends ReplicationService {
|
|||
* @param cells Cells that the WALEntries refer to (if cells is non-null)
|
||||
* @throws IOException
|
||||
*/
|
||||
void replicateLogEntries(List<WALEntry> entries, CellScanner cells) throws IOException;
|
||||
public void replicateLogEntries(List<WALEntry> entries, CellScanner cells) throws IOException;
|
||||
}
|
|
@ -32,5 +32,5 @@ public interface ReplicationSourceService extends ReplicationService {
|
|||
* Returns a WALObserver for the service. This is needed to
|
||||
* observe log rolls and log archival events.
|
||||
*/
|
||||
WALActionsListener getWALActionsListener();
|
||||
public WALActionsListener getWALActionsListener();
|
||||
}
|
|
@ -640,13 +640,13 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable {
|
|||
* is better to have workers prepare the task and then have the
|
||||
* {@link SplitLogManager} commit the work in SplitLogManager.TaskFinisher
|
||||
*/
|
||||
public interface TaskExecutor {
|
||||
enum Status {
|
||||
static public interface TaskExecutor {
|
||||
static public enum Status {
|
||||
DONE(),
|
||||
ERR(),
|
||||
RESIGNED(),
|
||||
PREEMPTED()
|
||||
}
|
||||
Status exec(String name, CancelableProgressable p);
|
||||
public Status exec(String name, CancelableProgressable p);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,13 +50,14 @@ public interface Store extends HeapSize, StoreConfigInformation {
|
|||
|
||||
/* The default priority for user-specified compaction requests.
|
||||
* The user gets top priority unless we have blocking compactions. (Pri <= 0)
|
||||
*/ int PRIORITY_USER = 1;
|
||||
int NO_PRIORITY = Integer.MIN_VALUE;
|
||||
*/
|
||||
public static final int PRIORITY_USER = 1;
|
||||
public static final int NO_PRIORITY = Integer.MIN_VALUE;
|
||||
|
||||
// General Accessors
|
||||
KeyValue.KVComparator getComparator();
|
||||
public KeyValue.KVComparator getComparator();
|
||||
|
||||
Collection<StoreFile> getStorefiles();
|
||||
public Collection<StoreFile> getStorefiles();
|
||||
|
||||
/**
|
||||
* Close all the readers We don't need to worry about subsequent requests because the HRegion
|
||||
|
@ -64,7 +65,7 @@ public interface Store extends HeapSize, StoreConfigInformation {
|
|||
* @return the {@link StoreFile StoreFiles} that were previously being used.
|
||||
* @throws IOException on failure
|
||||
*/
|
||||
Collection<StoreFile> close() throws IOException;
|
||||
public Collection<StoreFile> close() throws IOException;
|
||||
|
||||
/**
|
||||
* Return a scanner for both the memstore and the HStore files. Assumes we are not in a
|
||||
|
@ -74,7 +75,7 @@ public interface Store extends HeapSize, StoreConfigInformation {
|
|||
* @return a scanner over the current key values
|
||||
* @throws IOException on failure
|
||||
*/
|
||||
KeyValueScanner getScanner(Scan scan, final NavigableSet<byte[]> targetCols)
|
||||
public KeyValueScanner getScanner(Scan scan, final NavigableSet<byte[]> targetCols)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -88,16 +89,11 @@ public interface Store extends HeapSize, StoreConfigInformation {
|
|||
* @param stopRow
|
||||
* @return all scanners for this store
|
||||
*/
|
||||
List<KeyValueScanner> getScanners(
|
||||
boolean cacheBlocks,
|
||||
boolean isGet,
|
||||
boolean isCompaction,
|
||||
ScanQueryMatcher matcher,
|
||||
byte[] startRow,
|
||||
byte[] stopRow
|
||||
) throws IOException;
|
||||
public List<KeyValueScanner> getScanners(boolean cacheBlocks,
|
||||
boolean isGet, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow,
|
||||
byte[] stopRow) throws IOException;
|
||||
|
||||
ScanInfo getScanInfo();
|
||||
public ScanInfo getScanInfo();
|
||||
|
||||
/**
|
||||
* Adds or replaces the specified KeyValues.
|
||||
|
@ -112,14 +108,14 @@ public interface Store extends HeapSize, StoreConfigInformation {
|
|||
* @return memstore size delta
|
||||
* @throws IOException
|
||||
*/
|
||||
long upsert(Iterable<? extends Cell> cells, long readpoint) throws IOException;
|
||||
public long upsert(Iterable<? extends Cell> cells, long readpoint) throws IOException;
|
||||
|
||||
/**
|
||||
* Adds a value to the memstore
|
||||
* @param kv
|
||||
* @return memstore size delta
|
||||
*/
|
||||
long add(KeyValue kv);
|
||||
public long add(KeyValue kv);
|
||||
|
||||
/**
|
||||
* When was the last edit done in the memstore
|
||||
|
@ -131,7 +127,7 @@ public interface Store extends HeapSize, StoreConfigInformation {
|
|||
* key & memstoreTS value of the kv parameter.
|
||||
* @param kv
|
||||
*/
|
||||
void rollback(final KeyValue kv);
|
||||
public void rollback(final KeyValue kv);
|
||||
|
||||
/**
|
||||
* Find the key that matches <i>row</i> exactly, or the one that immediately precedes it. WARNING:
|
||||
|
@ -145,9 +141,9 @@ public interface Store extends HeapSize, StoreConfigInformation {
|
|||
* @return Found keyvalue or null if none found.
|
||||
* @throws IOException
|
||||
*/
|
||||
KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException;
|
||||
public KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException;
|
||||
|
||||
FileSystem getFileSystem();
|
||||
public FileSystem getFileSystem();
|
||||
|
||||
/*
|
||||
* @param maxKeyCount
|
||||
|
@ -156,48 +152,44 @@ public interface Store extends HeapSize, StoreConfigInformation {
|
|||
* @param includeMVCCReadpoint whether we should out the MVCC readpoint
|
||||
* @return Writer for a new StoreFile in the tmp dir.
|
||||
*/
|
||||
StoreFile.Writer createWriterInTmp(
|
||||
long maxKeyCount,
|
||||
Compression.Algorithm compression,
|
||||
boolean isCompaction,
|
||||
boolean includeMVCCReadpoint
|
||||
) throws IOException;
|
||||
public StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression,
|
||||
boolean isCompaction, boolean includeMVCCReadpoint) throws IOException;
|
||||
|
||||
// Compaction oriented methods
|
||||
|
||||
boolean throttleCompaction(long compactionSize);
|
||||
public boolean throttleCompaction(long compactionSize);
|
||||
|
||||
/**
|
||||
* getter for CompactionProgress object
|
||||
* @return CompactionProgress object; can be null
|
||||
*/
|
||||
CompactionProgress getCompactionProgress();
|
||||
public CompactionProgress getCompactionProgress();
|
||||
|
||||
CompactionContext requestCompaction() throws IOException;
|
||||
public CompactionContext requestCompaction() throws IOException;
|
||||
|
||||
CompactionContext requestCompaction(int priority, CompactionRequest baseRequest)
|
||||
public CompactionContext requestCompaction(int priority, CompactionRequest baseRequest)
|
||||
throws IOException;
|
||||
|
||||
void cancelRequestedCompaction(CompactionContext compaction);
|
||||
public void cancelRequestedCompaction(CompactionContext compaction);
|
||||
|
||||
List<StoreFile> compact(CompactionContext compaction) throws IOException;
|
||||
public List<StoreFile> compact(CompactionContext compaction) throws IOException;
|
||||
|
||||
/**
|
||||
* @return true if we should run a major compaction.
|
||||
*/
|
||||
boolean isMajorCompaction() throws IOException;
|
||||
public boolean isMajorCompaction() throws IOException;
|
||||
|
||||
void triggerMajorCompaction();
|
||||
public void triggerMajorCompaction();
|
||||
|
||||
/**
|
||||
* See if there's too much store files in this store
|
||||
* @return true if number of store files is greater than the number defined in minFilesToCompact
|
||||
*/
|
||||
boolean needsCompaction();
|
||||
public boolean needsCompaction();
|
||||
|
||||
int getCompactPriority();
|
||||
public int getCompactPriority();
|
||||
|
||||
StoreFlushContext createFlushContext(long cacheFlushId);
|
||||
public StoreFlushContext createFlushContext(long cacheFlushId);
|
||||
|
||||
/**
|
||||
* Call to complete a compaction. Its for the case where we find in the WAL a compaction
|
||||
|
@ -205,18 +197,18 @@ public interface Store extends HeapSize, StoreConfigInformation {
|
|||
* See HBASE-2331.
|
||||
* @param compaction
|
||||
*/
|
||||
void completeCompactionMarker(CompactionDescriptor compaction)
|
||||
public void completeCompactionMarker(CompactionDescriptor compaction)
|
||||
throws IOException;
|
||||
|
||||
// Split oriented methods
|
||||
|
||||
boolean canSplit();
|
||||
public boolean canSplit();
|
||||
|
||||
/**
|
||||
* Determines if Store should be split
|
||||
* @return byte[] if store should be split, null otherwise.
|
||||
*/
|
||||
byte[] getSplitPoint();
|
||||
public byte[] getSplitPoint();
|
||||
|
||||
// Bulk Load methods
|
||||
|
||||
|
@ -224,7 +216,7 @@ public interface Store extends HeapSize, StoreConfigInformation {
|
|||
* This throws a WrongRegionException if the HFile does not fit in this region, or an
|
||||
* InvalidHFileException if the HFile is not valid.
|
||||
*/
|
||||
void assertBulkLoadHFileOk(Path srcPath) throws IOException;
|
||||
public void assertBulkLoadHFileOk(Path srcPath) throws IOException;
|
||||
|
||||
/**
|
||||
* This method should only be called from HRegion. It is assumed that the ranges of values in the
|
||||
|
@ -233,7 +225,7 @@ public interface Store extends HeapSize, StoreConfigInformation {
|
|||
* @param srcPathStr
|
||||
* @param sequenceId sequence Id associated with the HFile
|
||||
*/
|
||||
void bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException;
|
||||
public void bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException;
|
||||
|
||||
// General accessors into the state of the store
|
||||
// TODO abstract some of this out into a metrics class
|
||||
|
@ -241,50 +233,50 @@ public interface Store extends HeapSize, StoreConfigInformation {
|
|||
/**
|
||||
* @return <tt>true</tt> if the store has any underlying reference files to older HFiles
|
||||
*/
|
||||
boolean hasReferences();
|
||||
public boolean hasReferences();
|
||||
|
||||
/**
|
||||
* @return The size of this store's memstore, in bytes
|
||||
*/
|
||||
long getMemStoreSize();
|
||||
public long getMemStoreSize();
|
||||
|
||||
HColumnDescriptor getFamily();
|
||||
public HColumnDescriptor getFamily();
|
||||
|
||||
/**
|
||||
* @return The maximum memstoreTS in all store files.
|
||||
*/
|
||||
long getMaxMemstoreTS();
|
||||
public long getMaxMemstoreTS();
|
||||
|
||||
/**
|
||||
* @return the data block encoder
|
||||
*/
|
||||
HFileDataBlockEncoder getDataBlockEncoder();
|
||||
public HFileDataBlockEncoder getDataBlockEncoder();
|
||||
|
||||
/** @return aggregate size of all HStores used in the last compaction */
|
||||
long getLastCompactSize();
|
||||
public long getLastCompactSize();
|
||||
|
||||
/** @return aggregate size of HStore */
|
||||
long getSize();
|
||||
public long getSize();
|
||||
|
||||
/**
|
||||
* @return Count of store files
|
||||
*/
|
||||
int getStorefilesCount();
|
||||
public int getStorefilesCount();
|
||||
|
||||
/**
|
||||
* @return The size of the store files, in bytes, uncompressed.
|
||||
*/
|
||||
long getStoreSizeUncompressed();
|
||||
public long getStoreSizeUncompressed();
|
||||
|
||||
/**
|
||||
* @return The size of the store files, in bytes.
|
||||
*/
|
||||
long getStorefilesSize();
|
||||
public long getStorefilesSize();
|
||||
|
||||
/**
|
||||
* @return The size of the store file indexes, in bytes.
|
||||
*/
|
||||
long getStorefilesIndexSize();
|
||||
public long getStorefilesIndexSize();
|
||||
|
||||
/**
|
||||
* Returns the total size of all index blocks in the data block indexes, including the root level,
|
||||
|
@ -292,14 +284,14 @@ public interface Store extends HeapSize, StoreConfigInformation {
|
|||
* single-level indexes.
|
||||
* @return the total size of block indexes in the store
|
||||
*/
|
||||
long getTotalStaticIndexSize();
|
||||
public long getTotalStaticIndexSize();
|
||||
|
||||
/**
|
||||
* Returns the total byte size of all Bloom filter bit arrays. For compound Bloom filters even the
|
||||
* Bloom blocks currently not loaded into the block cache are counted.
|
||||
* @return the total size of all Bloom filters in the store
|
||||
*/
|
||||
long getTotalStaticBloomSize();
|
||||
public long getTotalStaticBloomSize();
|
||||
|
||||
// Test-helper methods
|
||||
|
||||
|
@ -307,40 +299,40 @@ public interface Store extends HeapSize, StoreConfigInformation {
|
|||
* Used for tests.
|
||||
* @return cache configuration for this Store.
|
||||
*/
|
||||
CacheConfig getCacheConfig();
|
||||
public CacheConfig getCacheConfig();
|
||||
|
||||
/**
|
||||
* @return the parent region info hosting this store
|
||||
*/
|
||||
HRegionInfo getRegionInfo();
|
||||
public HRegionInfo getRegionInfo();
|
||||
|
||||
RegionCoprocessorHost getCoprocessorHost();
|
||||
public RegionCoprocessorHost getCoprocessorHost();
|
||||
|
||||
boolean areWritesEnabled();
|
||||
public boolean areWritesEnabled();
|
||||
|
||||
/**
|
||||
* @return The smallest mvcc readPoint across all the scanners in this
|
||||
* region. Writes older than this readPoint, are included in every
|
||||
* read operation.
|
||||
*/
|
||||
long getSmallestReadPoint();
|
||||
public long getSmallestReadPoint();
|
||||
|
||||
String getColumnFamilyName();
|
||||
public String getColumnFamilyName();
|
||||
|
||||
String getTableName();
|
||||
public String getTableName();
|
||||
|
||||
/*
|
||||
* @param o Observer who wants to know about changes in set of Readers
|
||||
*/
|
||||
void addChangedReaderObserver(ChangedReadersObserver o);
|
||||
public void addChangedReaderObserver(ChangedReadersObserver o);
|
||||
|
||||
/*
|
||||
* @param o Observer no longer interested in changes in set of Readers.
|
||||
*/
|
||||
void deleteChangedReaderObserver(ChangedReadersObserver o);
|
||||
public void deleteChangedReaderObserver(ChangedReadersObserver o);
|
||||
|
||||
/**
|
||||
* @return Whether this store has too many store files.
|
||||
*/
|
||||
boolean hasTooManyStoreFiles();
|
||||
public boolean hasTooManyStoreFiles();
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue