HBASE-12985 Javadoc warning and findbugs fixes to get us green again

This commit is contained in:
stack 2015-02-07 17:05:11 -08:00
parent ac175b1bd9
commit eea9873cef
9 changed files with 15 additions and 25 deletions

View File

@ -248,12 +248,6 @@ public class HttpDoAsClient {
System.out.println("row: " + utf8(rowResult.row.array()) + ", cols: " + rowStr); System.out.println("row: " + utf8(rowResult.row.array()) + ", cols: " + rowStr);
} }
private void printRow(List<TRowResult> rows) {
for (TRowResult rowResult : rows) {
printRow(rowResult);
}
}
static Subject getSubject() throws Exception { static Subject getSubject() throws Exception {
if (!secure) return new Subject(); if (!secure) return new Subject();
/* /*

View File

@ -86,7 +86,7 @@ public class SplitLogCounters {
public final static AtomicLong tot_wkr_task_grabing = new AtomicLong(0); public final static AtomicLong tot_wkr_task_grabing = new AtomicLong(0);
public static void resetCounters() throws Exception { public static void resetCounters() throws Exception {
Class<?> cl = (new SplitLogCounters()).getClass(); Class<?> cl = SplitLogCounters.class;
for (Field fld : cl.getDeclaredFields()) { for (Field fld : cl.getDeclaredFields()) {
if (!fld.isSynthetic()) ((AtomicLong)fld.get(null)).set(0); if (!fld.isSynthetic()) ((AtomicLong)fld.get(null)).set(0);
} }

View File

@ -43,16 +43,13 @@ import org.mortbay.log.Log;
public class ClientSideRegionScanner extends AbstractClientScanner { public class ClientSideRegionScanner extends AbstractClientScanner {
private HRegion region; private HRegion region;
private Scan scan;
RegionScanner scanner; RegionScanner scanner;
List<Cell> values; List<Cell> values;
public ClientSideRegionScanner(Configuration conf, FileSystem fs, public ClientSideRegionScanner(Configuration conf, FileSystem fs,
Path rootDir, HTableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics) Path rootDir, HTableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics)
throws IOException { throws IOException {
this.scan = scan;
// region is immutable, set isolation level // region is immutable, set isolation level
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);

View File

@ -1874,8 +1874,9 @@ public class RpcServer implements RpcServerInterface {
* instance else pass null for no authentication check. * instance else pass null for no authentication check.
* @param name Used keying this rpc servers' metrics and for naming the Listener thread. * @param name Used keying this rpc servers' metrics and for naming the Listener thread.
* @param services A list of services. * @param services A list of services.
* @param bindAddres Where to listen * @param bindAddress Where to listen
* @throws IOException * @param conf
* @param scheduler
*/ */
public RpcServer(final Server server, final String name, public RpcServer(final Server server, final String name,
final List<BlockingServiceAndInterface> services, final List<BlockingServiceAndInterface> services,

View File

@ -61,8 +61,8 @@ import com.google.common.collect.Sets;
/** /**
* The base class for load balancers. It provides the the functions used to by * The base class for load balancers. It provides the the functions used to by
* {@link org.apache.hadoop.hbase.master.AssignmentManager} to assign regions * {@link org.apache.hadoop.hbase.master.AssignmentManager} to assign regions
* in the edge cases. It doesn't provide an implementation of the * in the edge cases. It doesn't provide an implementation of the
* actual balancing algorithm. * actual balancing algorithm.
* *
*/ */
@ -138,8 +138,6 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
int numRegions; int numRegions;
int numMovedRegions = 0; //num moved regions from the initial configuration int numMovedRegions = 0; //num moved regions from the initial configuration
// num of moved regions away from master that should be on the master
int numMovedMetaRegions = 0; //num of moved regions that are META
Map<ServerName, List<HRegionInfo>> clusterState; Map<ServerName, List<HRegionInfo>> clusterState;
protected final RackManager rackManager; protected final RackManager rackManager;

View File

@ -53,7 +53,7 @@ public class FlushTableSubprocedure extends Subprocedure {
this.taskManager = taskManager; this.taskManager = taskManager;
} }
private class RegionFlushTask implements Callable<Void> { private static class RegionFlushTask implements Callable<Void> {
HRegion region; HRegion region;
RegionFlushTask(HRegion region) { RegionFlushTask(HRegion region) {
this.region = region; this.region = region;

View File

@ -1475,7 +1475,7 @@ public class HRegionServer extends HasThread implements
} }
} }
class PeriodicMemstoreFlusher extends ScheduledChore { static class PeriodicMemstoreFlusher extends ScheduledChore {
final HRegionServer server; final HRegionServer server;
final static int RANGE_OF_DELAY = 20000; //millisec final static int RANGE_OF_DELAY = 20000; //millisec
final static int MIN_DELAY_TIME = 3000; //millisec final static int MIN_DELAY_TIME = 3000; //millisec

View File

@ -82,7 +82,7 @@ public class WALCellCodec implements Codec {
static String getWALCellCodecClass(Configuration conf) { static String getWALCellCodecClass(Configuration conf) {
return conf.get(WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName()); return conf.get(WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
} }
/** /**
* Create and setup a {@link WALCellCodec} from the {@code cellCodecClsName} and * Create and setup a {@link WALCellCodec} from the {@code cellCodecClsName} and
* CompressionContext, if {@code cellCodecClsName} is specified. * CompressionContext, if {@code cellCodecClsName} is specified.
@ -106,7 +106,7 @@ public class WALCellCodec implements Codec {
} }
/** /**
* Create and setup a {@link WALCellCodec} from the * Create and setup a {@link WALCellCodec} from the
* CompressionContext. * CompressionContext.
* Cell Codec classname is read from {@link Configuration}. * Cell Codec classname is read from {@link Configuration}.
* Fully prepares the codec for use. * Fully prepares the codec for use.
@ -122,7 +122,7 @@ public class WALCellCodec implements Codec {
return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[] return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[]
{ Configuration.class, CompressionContext.class }, new Object[] { conf, compression }); { Configuration.class, CompressionContext.class }, new Object[] { conf, compression });
} }
public interface ByteStringCompressor { public interface ByteStringCompressor {
ByteString compress(byte[] data, Dictionary dict) throws IOException; ByteString compress(byte[] data, Dictionary dict) throws IOException;
} }
@ -249,7 +249,7 @@ public class WALCellCodec implements Codec {
protected Cell parseCell() throws IOException { protected Cell parseCell() throws IOException {
int keylength = StreamUtils.readRawVarint32(in); int keylength = StreamUtils.readRawVarint32(in);
int vlength = StreamUtils.readRawVarint32(in); int vlength = StreamUtils.readRawVarint32(in);
int tagsLength = StreamUtils.readRawVarint32(in); int tagsLength = StreamUtils.readRawVarint32(in);
int length = 0; int length = 0;
if(tagsLength == 0) { if(tagsLength == 0) {
@ -328,7 +328,7 @@ public class WALCellCodec implements Codec {
} }
} }
public class EnsureKvEncoder extends BaseEncoder { public static class EnsureKvEncoder extends BaseEncoder {
public EnsureKvEncoder(OutputStream out) { public EnsureKvEncoder(OutputStream out) {
super(out); super(out);
} }

View File

@ -85,7 +85,7 @@ public class ZKSecretWatcher extends ZooKeeperListener {
if (keysParentZNode.equals(ZKUtil.getParent(path))) { if (keysParentZNode.equals(ZKUtil.getParent(path))) {
String keyId = ZKUtil.getNodeName(path); String keyId = ZKUtil.getNodeName(path);
try { try {
Integer id = new Integer(keyId); Integer id = Integer.valueOf(keyId);
secretManager.removeKey(id); secretManager.removeKey(id);
} catch (NumberFormatException nfe) { } catch (NumberFormatException nfe) {
LOG.error("Invalid znode name for key ID '"+keyId+"'", nfe); LOG.error("Invalid znode name for key ID '"+keyId+"'", nfe);