HBASE-876 There are a large number of Java warnings in HBase

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@735945 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2009-01-20 06:18:19 +00:00
parent 1f8c2aeeaa
commit 4b49a9c162
16 changed files with 72 additions and 75 deletions

View File

@ -12,6 +12,8 @@ Release 0.20.0 - Unreleased
(Samuel Guo via Stack)
HBASE-1130 PrefixRowFilter (Michael Gottesman via Stack)
HBASE-1139 Update Clover in build.xml
HBASE-876 There are a large number of Java warnings in HBase; part 1
(Evgeny Ryabitskiy via Stack)
Release 0.19.0 - Unreleased
INCOMPATIBLE CHANGES

View File

@ -22,8 +22,10 @@ package org.apache.hadoop.hbase;
/**
* Thrown if issue with passed column name.
*/
@SuppressWarnings("serial")
public class ColumnNameParseException extends DoNotRetryIOException {
private static final long serialVersionUID = -2897373353949942302L;
/** default constructor */
public ColumnNameParseException() {
super();

View File

@ -25,8 +25,10 @@ import java.io.IOException;
* Subclass if exception is not meant to be retried: e.g.
* {@link UnknownScannerException}
*/
@SuppressWarnings("serial")
public class DoNotRetryIOException extends IOException {
private static final long serialVersionUID = 1197446454511704139L;
/**
* default constructor
*/

View File

@ -21,8 +21,10 @@ import java.io.IOException;
* Thrown during flush if the possibility snapshot content was not properly
* persisted into store files. Response should include replay of hlog content.
*/
@SuppressWarnings("serial")
public class DroppedSnapshotException extends IOException {
private static final long serialVersionUID = -5463156580831677374L;
/**
* @param msg
*/

View File

@ -40,7 +40,7 @@ import org.apache.hadoop.io.WritableComparable;
* column and recreating it. If there is data stored in the column, it will be
* deleted when the column is deleted.
*/
public class HColumnDescriptor implements WritableComparable {
public class HColumnDescriptor implements WritableComparable<HColumnDescriptor> {
// For future backward compatibility
// Version 3 was when column names become byte arrays and when we picked up
@ -489,7 +489,7 @@ public class HColumnDescriptor implements WritableComparable {
@Override
public boolean equals(Object obj) {
return compareTo(obj) == 0;
return compareTo((HColumnDescriptor)obj) == 0;
}
@Override
@ -502,7 +502,6 @@ public class HColumnDescriptor implements WritableComparable {
// Writable
@SuppressWarnings("deprecation")
public void readFields(DataInput in) throws IOException {
int version = in.readByte();
if (version < 6) {
@ -566,12 +565,11 @@ public class HColumnDescriptor implements WritableComparable {
// Comparable
public int compareTo(Object o) {
HColumnDescriptor other = (HColumnDescriptor)o;
int result = Bytes.compareTo(this.name, other.getName());
public int compareTo(HColumnDescriptor o) {
int result = Bytes.compareTo(this.name, o.getName());
if (result == 0) {
// punt on comparison for ordering, just calculate difference
result = this.values.hashCode() - other.values.hashCode();
result = this.values.hashCode() - o.values.hashCode();
if (result < 0)
result = -1;
else if (result > 0)

View File

@ -245,16 +245,14 @@ public interface HConstants {
// there.
public static final String HBASE_CLIENT_RETRIES_NUMBER_KEY =
"hbase.client.retries.number";
//TODO: DEFAULT_CLIENT_RETRIES is not referenced anywhere. Remove it.
public static final int DEFAULT_CLIENT_RETRIES = 5;
//TODO: although the following are referenced widely to format strings for
// the shell. They really aren't a part of the public API. It would be
// nice if we could put them somewhere where they did not need to be
// public. They could have package visibility
public static final String NAME = "NAME";
public static final String VERSIONS = "VERSIONS";
public static final String IN_MEMORY = "IN_MEMORY";
static final String NAME = "NAME";
static final String VERSIONS = "VERSIONS";
static final String IN_MEMORY = "IN_MEMORY";
/**
* This is a retry backoff multiplier table similar to the BSD TCP syn

View File

@ -34,7 +34,7 @@ import org.apache.hadoop.io.WritableComparable;
* Contains HRegion id, start and end keys, a reference to this
* HRegions' table descriptor, etc.
*/
public class HRegionInfo extends VersionedWritable implements WritableComparable {
public class HRegionInfo extends VersionedWritable implements WritableComparable<HRegionInfo>{
private final byte VERSION = 0;
/**
@ -325,7 +325,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
@Override
public boolean equals(Object o) {
return this.compareTo(o) == 0;
return this.compareTo((HRegionInfo)o) == 0;
}
@Override
@ -374,26 +374,25 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
// Comparable
//
public int compareTo(Object o) {
HRegionInfo other = (HRegionInfo) o;
if (other == null) {
public int compareTo(HRegionInfo o) {
if (o == null) {
return 1;
}
// Are regions of same table?
int result = this.tableDesc.compareTo(other.tableDesc);
int result = this.tableDesc.compareTo(o.tableDesc);
if (result != 0) {
return result;
}
// Compare start keys.
result = HStoreKey.compareTwoRowKeys(other, this.startKey, other.startKey);
result = HStoreKey.compareTwoRowKeys(o, this.startKey, o.startKey);
if (result != 0) {
return result;
}
// Compare end keys.
return HStoreKey.compareTwoRowKeys(other, this.endKey, other.endKey);
return HStoreKey.compareTwoRowKeys(o, this.endKey, o.endKey);
}
/**

View File

@ -23,8 +23,7 @@ package org.apache.hadoop.hbase;
* Contains the HRegionInfo for the region and the HServerAddress for the
* HRegionServer serving the region
*/
@SuppressWarnings("unchecked")
public class HRegionLocation implements Comparable {
public class HRegionLocation implements Comparable<HRegionLocation> {
private HRegionInfo regionInfo;
private HServerAddress serverAddress;
@ -47,7 +46,7 @@ public class HRegionLocation implements Comparable {
@Override
public boolean equals(Object o) {
return this.compareTo(o) == 0;
return this.compareTo((HRegionLocation)o) == 0;
}
@Override
@ -71,11 +70,10 @@ public class HRegionLocation implements Comparable {
// Comparable
//
public int compareTo(Object o) {
HRegionLocation other = (HRegionLocation) o;
int result = this.regionInfo.compareTo(other.regionInfo);
public int compareTo(HRegionLocation o) {
int result = this.regionInfo.compareTo(o.regionInfo);
if(result == 0) {
result = this.serverAddress.compareTo(other.serverAddress);
result = this.serverAddress.compareTo(o.serverAddress);
}
return result;
}

View File

@ -30,8 +30,7 @@ import java.net.InetSocketAddress;
* HServerAddress is a "label" for a HBase server that combines the host
* name and port number.
*/
@SuppressWarnings("unchecked")
public class HServerAddress implements WritableComparable {
public class HServerAddress implements WritableComparable<HServerAddress> {
private InetSocketAddress address;
String stringValue;
@ -117,7 +116,7 @@ public class HServerAddress implements WritableComparable {
@Override
public boolean equals(Object o) {
return this.compareTo(o) == 0;
return this.compareTo((HServerAddress)o) == 0;
}
@Override
@ -160,12 +159,11 @@ public class HServerAddress implements WritableComparable {
// Comparable
//
public int compareTo(Object o) {
HServerAddress that = (HServerAddress)o;
public int compareTo(HServerAddress o) {
// Addresses as Strings may not compare though address is for the one
// server with only difference being that one address has hostname
// resolved whereas other only has IP.
if (this.address.equals(that.address)) return 0;
return this.toString().compareTo(that.toString());
if (this.address.equals(o.address)) return 0;
return this.toString().compareTo(o.toString());
}
}

View File

@ -33,7 +33,7 @@ import org.apache.hadoop.io.WritableComparable;
* In the future it will contain information about the source machine and
* load statistics.
*/
public class HServerInfo implements WritableComparable {
public class HServerInfo implements WritableComparable<HServerInfo> {
private HServerAddress serverAddress;
private long startCode;
private HServerLoad load;
@ -123,7 +123,7 @@ public class HServerInfo implements WritableComparable {
@Override
public boolean equals(Object obj) {
return compareTo(obj) == 0;
return compareTo((HServerInfo)obj) == 0;
}
@Override
@ -151,19 +151,18 @@ public class HServerInfo implements WritableComparable {
out.writeInt(this.infoPort);
}
public int compareTo(Object o) {
HServerInfo that = (HServerInfo)o;
int result = getServerAddress().compareTo(that.getServerAddress());
public int compareTo(HServerInfo o) {
int result = getServerAddress().compareTo(o.getServerAddress());
if (result != 0) {
return result;
}
if (this.infoPort != that.infoPort) {
return this.infoPort - that.infoPort;
if (this.infoPort != o.infoPort) {
return this.infoPort - o.infoPort;
}
if (getStartCode() == that.getStartCode()) {
if (getStartCode() == o.getStartCode()) {
return 0;
}
// Startcodes are timestamps.
return (int)(getStartCode() - that.getStartCode());
return (int)(getStartCode() - o.getStartCode());
}
}

View File

@ -31,8 +31,7 @@ import org.apache.hadoop.io.WritableComparable;
/**
* This class encapsulates metrics for determining the load on a HRegionServer
*/
@SuppressWarnings("unchecked")
public class HServerLoad implements WritableComparable {
public class HServerLoad implements WritableComparable<HServerLoad> {
/** number of regions */
// could just use regionLoad.size() but master.RegionManager likes to play
// around with this value while passing HServerLoad objects around during
@ -279,7 +278,7 @@ public class HServerLoad implements WritableComparable {
@Override
public boolean equals(Object o) {
return compareTo(o) == 0;
return compareTo((HServerLoad)o) == 0;
}
@Override
@ -311,7 +310,7 @@ public class HServerLoad implements WritableComparable {
public int getStorefiles() {
int count = 0;
for (RegionLoad info: regionLoad)
count += info.storefiles;
count += info.getStorefiles();
return count;
}
@ -321,7 +320,7 @@ public class HServerLoad implements WritableComparable {
public int getMemcacheSizeInMB() {
int count = 0;
for (RegionLoad info: regionLoad)
count += info.memcacheSizeMB;
count += info.getMemcacheSizeMB();
return count;
}
@ -331,7 +330,7 @@ public class HServerLoad implements WritableComparable {
public int getStorefileIndexSizeInMB() {
int count = 0;
for (RegionLoad info: regionLoad)
count += info.storefileIndexSizeMB;
count += info.getStorefileIndexSizeMB();
return count;
}
@ -414,8 +413,7 @@ public class HServerLoad implements WritableComparable {
// Comparable
public int compareTo(Object o) {
HServerLoad other = (HServerLoad) o;
return this.getLoad() - other.getLoad();
public int compareTo(HServerLoad o) {
return this.getLoad() - o.getLoad();
}
}

View File

@ -356,7 +356,7 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
// Comparable
public int compareTo(final HStoreKey o) {
return compareTo(this.regionInfo, this, (HStoreKey)o);
return compareTo(this.regionInfo, this, o);
}
static int compareTo(final HRegionInfo hri, final HStoreKey left,
@ -548,10 +548,8 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
*/
public static boolean equalsTwoRowKeys(HRegionInfo regionInfo,
byte[] rowA, byte[] rowB) {
return rowA == null && rowB == null? true:
rowA == null && rowB != null? false:
rowA != null && rowB == null? false:
rowA.length != rowB.length? false:
return ((rowA == null) && (rowB == null)) ? true:
(rowA == null) || (rowB == null) || (rowA.length != rowB.length) ? false:
compareTwoRowKeys(regionInfo,rowA,rowB) == 0;
}
@ -609,6 +607,7 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
this.hri = hri;
}
@SuppressWarnings("unchecked")
@Override
public int compare(final WritableComparable left,
final WritableComparable right) {
@ -644,7 +643,7 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
}
@Override
public boolean equals(@SuppressWarnings("unused") Object obj) {
public boolean equals(Object obj) {
return false;
}

View File

@ -22,8 +22,10 @@ package org.apache.hadoop.hbase;
/**
* Reports a problem with a lease
*/
@SuppressWarnings("serial")
public class LeaseException extends DoNotRetryIOException {
private static final long serialVersionUID = 8179703995292418650L;
/** default constructor */
public LeaseException() {
super();

View File

@ -47,18 +47,17 @@ public class RegionHistorian implements HConstants {
private HTable metaTable;
private GregorianCalendar cal = new GregorianCalendar();
/** Singleton reference */
private static RegionHistorian historian;
/** Date formater for the timestamp in RegionHistoryInformation */
private static SimpleDateFormat dateFormat = new SimpleDateFormat(
static SimpleDateFormat dateFormat = new SimpleDateFormat(
"EEE, d MMM yyyy HH:mm:ss");
//TODO: Why is this public? Appears to only apply internally.
public static enum HistorianColumnKey {
private static enum HistorianColumnKey {
REGION_CREATION ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"creation")),
REGION_OPEN ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"open")),
REGION_SPLIT ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"split")),
@ -66,13 +65,13 @@ public class RegionHistorian implements HConstants {
REGION_FLUSH ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"flush")),
REGION_ASSIGNMENT ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"assignment"));
public byte[] key;
byte[] key;
HistorianColumnKey(byte[] key) {
this.key = key;
}
}
public static final String SPLIT_PREFIX = "Region split from: ";
/**
@ -199,8 +198,7 @@ public class RegionHistorian implements HConstants {
* @param info
* @param timeTaken
*/
public void addRegionFlush(HRegionInfo info,
@SuppressWarnings("unused") String timeTaken) {
public void addRegionFlush(HRegionInfo info, String timeTaken) {
// Disabled. Noop. If this regionserver is hosting the .META. AND is
// holding the reclaimMemcacheMemory global lock --
// see Flusher#flushSomeRegions -- we deadlock. For now, just disable
@ -249,6 +247,8 @@ public class RegionHistorian implements HConstants {
*/
public class RegionHistoryInformation implements
Comparable<RegionHistoryInformation> {
private GregorianCalendar cal = new GregorianCalendar();
private long timestamp;

View File

@ -80,16 +80,15 @@ public class RemoteExceptionHandler {
* exception is not an IOException. The decoded exception is set as
* the cause.
*/
@SuppressWarnings("unchecked")
public static IOException decodeRemoteException(final RemoteException re)
throws IOException {
IOException i = re;
try {
Class c = Class.forName(re.getClassName());
Class<?> c = Class.forName(re.getClassName());
Class[] parameterTypes = { String.class };
Constructor ctor = c.getConstructor(parameterTypes);
Class<?>[] parameterTypes = { String.class };
Constructor<?> ctor = c.getConstructor(parameterTypes);
Object[] arguments = { re.getMessage() };
Throwable t = (Throwable) ctor.newInstance(arguments);

View File

@ -22,9 +22,10 @@ package org.apache.hadoop.hbase;
/**
* Thrown when a value is longer than the specified LENGTH
*/
@SuppressWarnings("serial")
public class ValueOverMaxLengthException extends DoNotRetryIOException {
private static final long serialVersionUID = -5525656352372008316L;
/**
* default constructor
*/