HBASE-9036 Few small code cleanup (Jean-Marc)

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1506669 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Zhihong Yu 2013-07-24 19:37:06 +00:00
parent e5a3842a12
commit 6ddd020445
12 changed files with 203 additions and 199 deletions

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase;
import com.google.common.net.InetAddresses;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@ -28,6 +29,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServ
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
@ -51,7 +53,7 @@ import java.util.regex.Pattern;
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ServerName implements Comparable<ServerName> {
public class ServerName implements Comparable<ServerName>, Serializable {
/**
* Version for this class.
* Its a short rather than a byte so I can for sure distinguish between this

View File

@ -218,7 +218,7 @@ public class TestOperation {
Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER, COL_PRE_FILTER,
CCG_FILTER, CP_FILTER, PREFIX_FILTER, PAGE_FILTER));
private static String STR_L_FILTER_LIST = String.format(
"%s AND (5/8): [%s, %s, %s, %s, %s]",
"%s AND (5/8): [%s, %s, %s, %s, %s, %s]",
L_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER,
STR_CR_FILTER, STR_COL_PRE_FILTER, STR_CCG_FILTER, STR_CP_FILTER);

View File

@ -133,51 +133,53 @@ public class ClassFinder {
try {
jarFile = new JarInputStream(new FileInputStream(jarFileName));
} catch (IOException ioEx) {
if (!proceedOnExceptions) {
throw ioEx;
}
LOG.error("Failed to look for classes in " + jarFileName + ": " + ioEx);
throw ioEx;
}
Set<Class<?>> classes = new HashSet<Class<?>>();
JarEntry entry = null;
while (true) {
try {
entry = jarFile.getNextJarEntry();
} catch (IOException ioEx) {
if (!proceedOnExceptions) {
throw ioEx;
try {
while (true) {
try {
entry = jarFile.getNextJarEntry();
} catch (IOException ioEx) {
if (!proceedOnExceptions) {
throw ioEx;
}
LOG.error("Failed to get next entry from " + jarFileName + ": " + ioEx);
break;
}
if (entry == null) {
break; // loop termination condition
}
LOG.error("Failed to get next entry from " + jarFileName + ": " + ioEx);
break;
}
if (entry == null) {
break; // loop termination condition
}
String className = entry.getName();
if (!className.endsWith(CLASS_EXT)) {
continue;
}
int ix = className.lastIndexOf('/');
String fileName = (ix >= 0) ? className.substring(ix + 1) : className;
if (null != this.fileNameFilter
&& !this.fileNameFilter.isCandidateFile(fileName, className)) {
continue;
}
className = className
.substring(0, className.length() - CLASS_EXT.length()).replace('/', '.');
if (!className.startsWith(packageName)) {
continue;
}
Class<?> c = makeClass(className, proceedOnExceptions);
if (c != null) {
if (!classes.add(c)) {
LOG.error("Ignoring duplicate class " + className);
String className = entry.getName();
if (!className.endsWith(CLASS_EXT)) {
continue;
}
int ix = className.lastIndexOf('/');
String fileName = (ix >= 0) ? className.substring(ix + 1) : className;
if (null != this.fileNameFilter
&& !this.fileNameFilter.isCandidateFile(fileName, className)) {
continue;
}
className =
className.substring(0, className.length() - CLASS_EXT.length()).replace('/', '.');
if (!className.startsWith(packageName)) {
continue;
}
Class<?> c = makeClass(className, proceedOnExceptions);
if (c != null) {
if (!classes.add(c)) {
LOG.error("Ignoring duplicate class " + className);
}
}
}
return classes;
} finally {
jarFile.close();
}
return classes;
}
private Set<Class<?>> findClassesFromFiles(File baseDirectory, String packageName,

View File

@ -181,14 +181,15 @@ public class HFileWriterV2 extends AbstractHFileWriter {
byte[] fakeKey = ((KeyComparator) comparator).getShortMidpointKey(
lastKeyOfPreviousBlock, firstKeyInBlock);
if (comparator.compare(fakeKey, firstKeyInBlock) > 0) {
throw new IOException("Unexpected getShortMidpointKey result, fakeKey:" + fakeKey
+ ", firstKeyInBlock:" + firstKeyInBlock);
throw new IOException("Unexpected getShortMidpointKey result, fakeKey:"
+ Bytes.toStringBinary(fakeKey) + ", firstKeyInBlock:"
+ Bytes.toStringBinary(firstKeyInBlock));
}
if (lastKeyOfPreviousBlock != null && comparator.compare(lastKeyOfPreviousBlock,
fakeKey) >= 0) {
throw new IOException("Unexpected getShortMidpointKey result, lastKeyOfPreviousBlock:" +
Bytes.toString(lastKeyOfPreviousBlock) + ", fakeKey:" +
Bytes.toString(fakeKey));
Bytes.toStringBinary(lastKeyOfPreviousBlock) + ", fakeKey:" +
Bytes.toStringBinary(fakeKey));
}
dataBlockIndexWriter.addEntry(fakeKey, lastDataBlockOffset,onDiskSize);
} else {

View File

@ -118,8 +118,7 @@ public class CellCounter {
for (KeyValue value : values.list()) {
currentRowKey = Bytes.toStringBinary(value.getRow());
String thisRowFamilyName = Bytes.toStringBinary(value.getFamily());
if (thisRowFamilyName != null &&
!thisRowFamilyName.equals(currentFamilyName)) {
if (!thisRowFamilyName.equals(currentFamilyName)) {
currentFamilyName = thisRowFamilyName;
context.getCounter("CF", thisRowFamilyName).increment(1);
context.write(new Text("Total Families Across all Rows"),
@ -128,8 +127,7 @@ public class CellCounter {
}
String thisRowQualifierName = thisRowFamilyName + separator
+ Bytes.toStringBinary(value.getQualifier());
if (thisRowQualifierName != null &&
!thisRowQualifierName.equals(currentQualifierName)) {
if (!thisRowQualifierName.equals(currentQualifierName)) {
currentQualifierName = thisRowQualifierName;
context.getCounter("CFQL", thisRowQualifierName).increment(1);
context.write(new Text("Total Qualifiers across all Rows"),

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.master.balancer;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@ -165,7 +166,7 @@ public class FavoredNodeAssignmentHelper {
put.add(HConstants.CATALOG_FAMILY, FAVOREDNODES_QUALIFIER,
EnvironmentEdgeManager.currentTimeMillis(), favoredNodes);
LOG.info("Create the region " + regionInfo.getRegionNameAsString() +
" with favored nodes " + favoredNodes);
" with favored nodes " + Bytes.toString(favoredNodes));
}
return put;
}

View File

@ -82,7 +82,7 @@ public class ModifyTableHandler extends TableEventHandler {
for (byte[] familyName: oldFamilies) {
if (!newFamilies.contains(familyName)) {
LOG.debug("Removing family=" + Bytes.toString(familyName) +
" from table=" + this.tableName);
" from table=" + Bytes.toString(this.tableName));
for (HRegionInfo hri: hris) {
// Delete the family directory in FS for all the regions one by one
mfs.deleteFamilyFromFS(hri, familyName);

View File

@ -3532,10 +3532,8 @@ public class HRegion implements HeapSize { // , Writable{
}
if (region != null && region.metricsRegion != null) {
long totalSize = 0;
if (outResults != null) {
for(KeyValue kv:outResults) {
totalSize += kv.getLength();
}
for(KeyValue kv:outResults) {
totalSize += kv.getLength();
}
region.metricsRegion.updateScanNext(totalSize);
}

View File

@ -326,7 +326,7 @@ public class RegionScannerHolder {
}
// coprocessor postNext hook
if (region != null && region.getCoprocessorHost() != null) {
if (region.getCoprocessorHost() != null) {
region.getCoprocessorHost().postScannerNext(scanner, results, rows, true);
}
}

View File

@ -96,54 +96,56 @@ import com.google.protobuf.ByteString;
@InterfaceAudience.Private
public class StorageClusterStatusModel
implements Serializable, ProtobufMessageHandler {
private static final long serialVersionUID = 1L;
private static final long serialVersionUID = 1L;
/**
* Represents a region server.
*/
public static class Node {
/**
* Represents a region hosted on a region server.
*/
public static class Region {
private byte[] name;
private int stores;
private int storefiles;
private int storefileSizeMB;
private int memstoreSizeMB;
private int storefileIndexSizeMB;
private long readRequestsCount;
private long writeRequestsCount;
private int rootIndexSizeKB;
private int totalStaticIndexSizeKB;
private int totalStaticBloomSizeKB;
private long totalCompactingKVs;
private long currentCompactedKVs;
/**
* Represents a region server.
*/
public static class Node implements Serializable {
private static final long serialVersionUID = 1L;
/**
* Default constructor
*/
public Region() {}
/**
* Represents a region hosted on a region server.
*/
public static class Region {
private byte[] name;
private int stores;
private int storefiles;
private int storefileSizeMB;
private int memstoreSizeMB;
private int storefileIndexSizeMB;
private long readRequestsCount;
private long writeRequestsCount;
private int rootIndexSizeKB;
private int totalStaticIndexSizeKB;
private int totalStaticBloomSizeKB;
private long totalCompactingKVs;
private long currentCompactedKVs;
/**
* Constructor
* @param name the region name
*/
public Region(byte[] name) {
this.name = name;
}
/**
* Default constructor
*/
public Region() {
}
/**
* Constructor
* @param name the region name
* @param stores the number of stores
* @param storefiles the number of store files
* @param storefileSizeMB total size of store files, in MB
* @param memstoreSizeMB total size of memstore, in MB
* @param storefileIndexSizeMB total size of store file indexes, in MB
*/
public Region(byte[] name, int stores, int storefiles,
/**
* Constructor
* @param name the region name
*/
public Region(byte[] name) {
this.name = name;
}
/**
* Constructor
* @param name the region name
* @param stores the number of stores
* @param storefiles the number of store files
* @param storefileSizeMB total size of store files, in MB
* @param memstoreSizeMB total size of memstore, in MB
* @param storefileIndexSizeMB total size of store file indexes, in MB
*/
public Region(byte[] name, int stores, int storefiles,
int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB,
long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB,
int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
@ -164,18 +166,18 @@ public class StorageClusterStatusModel
}
/**
* @return the region name
*/
@XmlAttribute
public byte[] getName() {
return name;
}
* @return the region name
*/
@XmlAttribute
public byte[] getName() {
return name;
}
/**
* @return the number of stores
*/
@XmlAttribute
public int getStores() {
/**
* @return the number of stores
*/
@XmlAttribute
public int getStores() {
return stores;
}
@ -210,7 +212,7 @@ public class StorageClusterStatusModel
public int getStorefileIndexSizeMB() {
return storefileIndexSizeMB;
}
/**
* @return the current total read requests made to region
*/
@ -250,7 +252,7 @@ public class StorageClusterStatusModel
public int getTotalStaticBloomSizeKB() {
return totalStaticBloomSizeKB;
}
/**
* @return The total number of compacting key-values
*/
@ -273,7 +275,7 @@ public class StorageClusterStatusModel
public void setReadRequestsCount(long readRequestsCount) {
this.readRequestsCount = readRequestsCount;
}
/**
* @param rootIndexSizeKB The current total size of root-level indexes
* for the region, in KB
@ -281,14 +283,14 @@ public class StorageClusterStatusModel
public void setRootIndexSizeKB(int rootIndexSizeKB) {
this.rootIndexSizeKB = rootIndexSizeKB;
}
/**
* @param writeRequestsCount The current total write requests made to region
*/
public void setWriteRequestsCount(long writeRequestsCount) {
this.writeRequestsCount = writeRequestsCount;
}
/**
* @param currentCompactedKVs The completed count of key values
* in currently running compaction
@ -296,7 +298,7 @@ public class StorageClusterStatusModel
public void setCurrentCompactedKVs(long currentCompactedKVs) {
this.currentCompactedKVs = currentCompactedKVs;
}
/**
* @param totalCompactingKVs The total compacting key values
* in currently running compaction
@ -304,7 +306,7 @@ public class StorageClusterStatusModel
public void setTotalCompactingKVs(long totalCompactingKVs) {
this.totalCompactingKVs = totalCompactingKVs;
}
/**
* @param totalStaticBloomSizeKB The total size of all Bloom filter blocks,
* not just loaded into the block cache, in KB.
@ -312,7 +314,7 @@ public class StorageClusterStatusModel
public void setTotalStaticBloomSizeKB(int totalStaticBloomSizeKB) {
this.totalStaticBloomSizeKB = totalStaticBloomSizeKB;
}
/**
* @param totalStaticIndexSizeKB The total size of all index blocks,
* not just the root level, in KB.
@ -320,17 +322,17 @@ public class StorageClusterStatusModel
public void setTotalStaticIndexSizeKB(int totalStaticIndexSizeKB) {
this.totalStaticIndexSizeKB = totalStaticIndexSizeKB;
}
/**
* @param name the region name
*/
public void setName(byte[] name) {
this.name = name;
}
/**
* @param stores the number of stores
*/
/**
* @param name the region name
*/
public void setName(byte[] name) {
this.name = name;
}
/**
* @param stores the number of stores
*/
public void setStores(int stores) {
this.stores = stores;
}
@ -362,9 +364,9 @@ public class StorageClusterStatusModel
public void setStorefileIndexSizeMB(int storefileIndexSizeMB) {
this.storefileIndexSizeMB = storefileIndexSizeMB;
}
}
}
private String name;
private String name;
private long startCode;
private int requests;
private int heapSizeMB;
@ -499,73 +501,73 @@ public class StorageClusterStatusModel
public void setRequests(int requests) {
this.requests = requests;
}
}
}
private List<Node> liveNodes = new ArrayList<Node>();
private List<String> deadNodes = new ArrayList<String>();
private int regions;
private int requests;
private double averageLoad;
private List<Node> liveNodes = new ArrayList<Node>();
private List<String> deadNodes = new ArrayList<String>();
private int regions;
private int requests;
private double averageLoad;
/**
* Add a live node to the cluster representation.
* @param name the region server name
* @param startCode the region server's start code
* @param heapSizeMB the current heap size, in MB
* @param maxHeapSizeMB the maximum heap size, in MB
*/
public Node addLiveNode(String name, long startCode, int heapSizeMB,
int maxHeapSizeMB) {
Node node = new Node(name, startCode);
node.setHeapSizeMB(heapSizeMB);
node.setMaxHeapSizeMB(maxHeapSizeMB);
liveNodes.add(node);
return node;
}
/**
* Add a live node to the cluster representation.
* @param name the region server name
* @param startCode the region server's start code
* @param heapSizeMB the current heap size, in MB
* @param maxHeapSizeMB the maximum heap size, in MB
*/
public Node addLiveNode(String name, long startCode, int heapSizeMB, int maxHeapSizeMB) {
Node node = new Node(name, startCode);
node.setHeapSizeMB(heapSizeMB);
node.setMaxHeapSizeMB(maxHeapSizeMB);
liveNodes.add(node);
return node;
}
/**
* @param index the index
* @return the region server model
*/
public Node getLiveNode(int index) {
return liveNodes.get(index);
}
/**
* @param index the index
* @return the region server model
*/
public Node getLiveNode(int index) {
return liveNodes.get(index);
}
/**
* Add a dead node to the cluster representation.
* @param node the dead region server's name
*/
public void addDeadNode(String node) {
deadNodes.add(node);
}
/**
* @param index the index
* @return the dead region server's name
*/
public String getDeadNode(int index) {
return deadNodes.get(index);
}
/**
* Add a dead node to the cluster representation.
* @param node the dead region server's name
*/
public void addDeadNode(String node) {
deadNodes.add(node);
}
/**
* Default constructor
*/
public StorageClusterStatusModel() {}
/**
* @param index the index
* @return the dead region server's name
*/
public String getDeadNode(int index) {
return deadNodes.get(index);
}
/**
* @return the list of live nodes
*/
@XmlElement(name="Node")
@XmlElementWrapper(name="LiveNodes")
public List<Node> getLiveNodes() {
return liveNodes;
}
/**
* Default constructor
*/
public StorageClusterStatusModel() {
}
/**
* @return the list of dead nodes
*/
@XmlElement(name="Node")
@XmlElementWrapper(name="DeadNodes")
/**
* @return the list of live nodes
*/
@XmlElement(name = "Node")
@XmlElementWrapper(name = "LiveNodes")
public List<Node> getLiveNodes() {
return liveNodes;
}
/**
* @return the list of dead nodes
*/
@XmlElement(name = "Node")
@XmlElementWrapper(name = "DeadNodes")
public List<String> getDeadNodes() {
return deadNodes;
}
@ -631,13 +633,14 @@ public class StorageClusterStatusModel
this.averageLoad = averageLoad;
}
/* (non-Javadoc)
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(String.format("%d live servers, %d dead servers, " +
/*
* (non-Javadoc)
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(String.format("%d live servers, %d dead servers, " +
"%.4f average load%n%n", liveNodes.size(), deadNodes.size(),
averageLoad));
if (!liveNodes.isEmpty()) {
@ -699,9 +702,9 @@ public class StorageClusterStatusModel
sb.append('\n');
}
}
return sb.toString();
}
return sb.toString();
}
@Override
public byte[] createProtobufOutput() {
StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();

View File

@ -1093,7 +1093,6 @@ public class AccessController extends BaseRegionObserver
@Override
public void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> familyPaths) throws IOException {
List<byte[]> cfs = new LinkedList<byte[]>();
for(Pair<byte[],String> el : familyPaths) {
requirePermission("preBulkLoadHFile",
ctx.getEnvironment().getRegion().getTableDesc().getName(),

View File

@ -3235,7 +3235,7 @@ public class HBaseFsck extends Configured implements Tool {
* Display the full report from fsck. This displays all live and dead region
* servers, and all known regions.
*/
public void setDisplayFullReport() {
public static void setDisplayFullReport() {
details = true;
}