HBASE-1262 Eclipse warnings, including performance related things like synthetic accessors
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@755878 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d8bf82b984
commit
bfda6ae20e
|
@ -105,6 +105,8 @@ Release 0.20.0 - Unreleased
|
|||
(Ryan Rawson via Stack)
|
||||
HBASE-1265 HLogEdit static constants should be final (Nitay Joffe via Stack)
|
||||
HBASE-1244 ZooKeeperWrapper constants cleanup (Nitay Joffe via Stack)
|
||||
HBASE-1262 Eclipse warnings, including performance related things like
|
||||
synthetic accessors (Nitay Joffe via Stack)
|
||||
|
||||
|
||||
Release 0.19.0 - 01/21/2009
|
||||
|
|
|
@ -52,6 +52,9 @@ public abstract class Chore extends Thread {
|
|||
this.stop = s;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Thread#run()
|
||||
*/
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
|
|
|
@ -61,6 +61,7 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
|
|||
* @see org.apache.hadoop.io.SequenceFile.Writer
|
||||
* @deprecated Replaced by {@link Compression.Algorithm}.
|
||||
*/
|
||||
@Deprecated
|
||||
public static enum CompressionType {
|
||||
/** Do not compress records. */
|
||||
NONE,
|
||||
|
@ -95,6 +96,7 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
|
|||
* Default maximum cell length.
|
||||
*/
|
||||
public static final int DEFAULT_LENGTH = Integer.MAX_VALUE;
|
||||
/** Default maximum cell length as an Integer. */
|
||||
public static final Integer DEFAULT_LENGTH_INTEGER =
|
||||
Integer.valueOf(DEFAULT_LENGTH);
|
||||
|
||||
|
@ -226,6 +228,7 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
|
|||
* @param inMemory If true, column data should be kept in an HRegionServer's
|
||||
* cache
|
||||
* @param blockCacheEnabled If true, MapFile blocks should be cached
|
||||
* @param blocksize
|
||||
* @param maxValueLength Restrict values to <= this value
|
||||
* @param timeToLive Time-to-live of cell contents, in seconds
|
||||
* (use HConstants.FOREVER for unlimited TTL)
|
||||
|
@ -522,6 +525,9 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
|
|||
setValue(MAPFILE_INDEX_INTERVAL, Integer.toString(interval));
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer s = new StringBuffer();
|
||||
|
@ -541,12 +547,27 @@ public class HColumnDescriptor implements ISerializable, WritableComparable<HCol
|
|||
s.append('}');
|
||||
return s.toString();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#equals(java.lang.Object)
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (!(obj instanceof HColumnDescriptor)) {
|
||||
return false;
|
||||
}
|
||||
return compareTo((HColumnDescriptor)obj) == 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#hashCode()
|
||||
*/
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = Bytes.hashCode(this.name);
|
||||
|
|
|
@ -110,7 +110,7 @@ class HMerge implements HConstants {
|
|||
|
||||
this.tabledir = new Path(
|
||||
fs.makeQualified(new Path(conf.get(HBASE_DIR))),
|
||||
tableName.toString()
|
||||
Bytes.toString(tableName)
|
||||
);
|
||||
Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
|
||||
HREGION_LOGDIR_NAME);
|
||||
|
@ -162,16 +162,16 @@ class HMerge implements HConstants {
|
|||
if ((currentSize + nextSize) <= (maxFilesize / 2)) {
|
||||
// We merge two adjacent regions if their total size is less than
|
||||
// one half of the desired maximum size
|
||||
LOG.info("merging regions " + currentRegion.getRegionName()
|
||||
+ " and " + nextRegion.getRegionName());
|
||||
LOG.info("merging regions " + Bytes.toString(currentRegion.getRegionName())
|
||||
+ " and " + Bytes.toString(nextRegion.getRegionName()));
|
||||
HRegion mergedRegion =
|
||||
HRegion.mergeAdjacent(currentRegion, nextRegion);
|
||||
updateMeta(currentRegion.getRegionName(), nextRegion.getRegionName(),
|
||||
mergedRegion);
|
||||
break;
|
||||
}
|
||||
LOG.info("not merging regions " + currentRegion.getRegionName()
|
||||
+ " and " + nextRegion.getRegionName());
|
||||
LOG.info("not merging regions " + Bytes.toString(currentRegion.getRegionName())
|
||||
+ " and " + Bytes.toString(nextRegion.getRegionName()));
|
||||
currentRegion.close();
|
||||
currentRegion = nextRegion;
|
||||
currentSize = nextSize;
|
||||
|
@ -216,7 +216,7 @@ class HMerge implements HConstants {
|
|||
Cell regionInfo = results.get(COL_REGIONINFO);
|
||||
if (regionInfo == null || regionInfo.getValue().length == 0) {
|
||||
throw new NoSuchElementException("meta region entry missing " +
|
||||
COL_REGIONINFO);
|
||||
Bytes.toString(COL_REGIONINFO));
|
||||
}
|
||||
HRegionInfo region = Writables.getHRegionInfo(regionInfo.getValue());
|
||||
if (!Bytes.equals(region.getTableDesc().getName(), this.tableName)) {
|
||||
|
@ -249,7 +249,7 @@ class HMerge implements HConstants {
|
|||
RowResult currentRow = metaScanner.next();
|
||||
boolean foundResult = false;
|
||||
while (currentRow != null) {
|
||||
LOG.info("Row: <" + currentRow.getRow() + ">");
|
||||
LOG.info("Row: <" + Bytes.toString(currentRow.getRow()) + ">");
|
||||
Cell regionInfo = currentRow.get(COL_REGIONINFO);
|
||||
if (regionInfo == null || regionInfo.getValue().length == 0) {
|
||||
currentRow = metaScanner.next();
|
||||
|
@ -289,7 +289,7 @@ class HMerge implements HConstants {
|
|||
}
|
||||
table.deleteAll(regionsToDelete[r]);
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("updated columns in row: " + regionsToDelete[r]);
|
||||
LOG.debug("updated columns in row: " + Bytes.toString(regionsToDelete[r]));
|
||||
}
|
||||
}
|
||||
newRegion.getRegionInfo().setOffline(true);
|
||||
|
@ -301,7 +301,7 @@ class HMerge implements HConstants {
|
|||
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("updated columns in row: "
|
||||
+ newRegion.getRegionName());
|
||||
+ Bytes.toString(newRegion.getRegionName()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -378,7 +378,7 @@ class HMerge implements HConstants {
|
|||
root.batchUpdate(b,null);
|
||||
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("updated columns in row: " + regionsToDelete[r]);
|
||||
LOG.debug("updated columns in row: " + Bytes.toString(regionsToDelete[r]));
|
||||
}
|
||||
}
|
||||
HRegionInfo newInfo = newRegion.getRegionInfo();
|
||||
|
@ -387,7 +387,7 @@ class HMerge implements HConstants {
|
|||
b.put(COL_REGIONINFO, Writables.getBytes(newInfo));
|
||||
root.batchUpdate(b,null);
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("updated columns in row: " + newRegion.getRegionName());
|
||||
LOG.debug("updated columns in row: " + Bytes.toString(newRegion.getRegionName()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -215,6 +215,9 @@ public class HMsg implements Writable {
|
|||
return this.safeMode;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
|
@ -230,15 +233,30 @@ public class HMsg implements Writable {
|
|||
sb.append(": safeMode=" + safeMode);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#equals(java.lang.Object)
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
HMsg that = (HMsg)obj;
|
||||
return this.type.equals(that.type) &&
|
||||
(this.info != null)? this.info.equals(that.info):
|
||||
that.info == null;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#hashCode()
|
||||
*/
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = this.type.hashCode();
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.apache.hadoop.hbase;
|
|||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.JenkinsHash;
|
||||
|
@ -34,7 +35,7 @@ import org.apache.hadoop.io.WritableComparable;
|
|||
* HRegions' table descriptor, etc.
|
||||
*/
|
||||
public class HRegionInfo extends VersionedWritable implements WritableComparable<HRegionInfo>{
|
||||
private final byte VERSION = 0;
|
||||
private static final byte VERSION = 0;
|
||||
|
||||
/**
|
||||
* @param regionName
|
||||
|
@ -70,10 +71,10 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
|||
private boolean splitRequest = false;
|
||||
|
||||
private void setHashCode() {
|
||||
int result = this.regionName.hashCode();
|
||||
int result = Arrays.hashCode(this.regionName);
|
||||
result ^= this.regionId;
|
||||
result ^= this.startKey.hashCode();
|
||||
result ^= this.endKey.hashCode();
|
||||
result ^= Arrays.hashCode(this.startKey);
|
||||
result ^= Arrays.hashCode(this.endKey);
|
||||
result ^= Boolean.valueOf(this.offLine).hashCode();
|
||||
result ^= this.tableDesc.hashCode();
|
||||
this.hashCode = result;
|
||||
|
@ -351,6 +352,9 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
|||
this.offLine = offLine;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return "REGION => {" + HConstants.NAME + " => '" +
|
||||
|
@ -363,12 +367,27 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
|||
(isSplit()? " SPLIT => true,": "") +
|
||||
" TABLE => {" + this.tableDesc.toString() + "}";
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#equals(java.lang.Object)
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null) {
|
||||
return false;
|
||||
}
|
||||
if (!(o instanceof HRegionInfo)) {
|
||||
return false;
|
||||
}
|
||||
return this.compareTo((HRegionInfo)o) == 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#hashCode()
|
||||
*/
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return this.hashCode;
|
||||
|
|
|
@ -37,18 +37,36 @@ public class HRegionLocation implements Comparable<HRegionLocation> {
|
|||
this.regionInfo = regionInfo;
|
||||
this.serverAddress = serverAddress;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return "address: " + this.serverAddress.toString() + ", regioninfo: " +
|
||||
this.regionInfo;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#equals(java.lang.Object)
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null) {
|
||||
return false;
|
||||
}
|
||||
if (!(o instanceof HRegionLocation)) {
|
||||
return false;
|
||||
}
|
||||
return this.compareTo((HRegionLocation)o) == 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#hashCode()
|
||||
*/
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = this.regionInfo.hashCode();
|
||||
|
|
|
@ -109,16 +109,34 @@ public class HServerAddress implements WritableComparable<HServerAddress> {
|
|||
return address;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return (stringValue == null ? "" : stringValue);
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#equals(java.lang.Object)
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
return this.compareTo((HServerAddress)o) == 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#hashCode()
|
||||
*/
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = this.address.hashCode();
|
||||
|
|
|
@ -85,7 +85,7 @@ public class HServerInfo implements WritableComparable<HServerInfo> {
|
|||
}
|
||||
|
||||
/** @return the server address */
|
||||
public HServerAddress getServerAddress() {
|
||||
public synchronized HServerAddress getServerAddress() {
|
||||
return new HServerAddress(serverAddress);
|
||||
}
|
||||
|
||||
|
@ -99,7 +99,7 @@ public class HServerInfo implements WritableComparable<HServerInfo> {
|
|||
}
|
||||
|
||||
/** @return the server start code */
|
||||
public long getStartCode() {
|
||||
public synchronized long getStartCode() {
|
||||
return startCode;
|
||||
}
|
||||
|
||||
|
@ -128,17 +128,35 @@ public class HServerInfo implements WritableComparable<HServerInfo> {
|
|||
return this.serverName;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return "address: " + this.serverAddress + ", startcode: " + this.startCode
|
||||
+ ", load: (" + this.load.toString() + ")";
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#equals(java.lang.Object)
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
return compareTo((HServerInfo)obj) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#hashCode()
|
||||
*/
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return this.getServerName().hashCode();
|
||||
|
|
|
@ -180,6 +180,9 @@ public class HServerLoad implements WritableComparable<HServerLoad> {
|
|||
out.writeInt(storefileIndexSizeMB);
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "stores",
|
||||
|
@ -249,7 +252,10 @@ public class HServerLoad implements WritableComparable<HServerLoad> {
|
|||
// return load;
|
||||
return numberOfRegions;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return toString(1);
|
||||
|
@ -273,11 +279,26 @@ public class HServerLoad implements WritableComparable<HServerLoad> {
|
|||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#equals(java.lang.Object)
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
return compareTo((HServerLoad)o) == 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#hashCode()
|
||||
*/
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = Integer.valueOf(numberOfRequests).hashCode();
|
||||
|
|
|
@ -258,15 +258,30 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
|
|||
Bytes.compareTo(getColumn(), 0, delimiterIndex, that.getColumn(), 0,
|
||||
delimiterIndex) == 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return Bytes.toString(this.row) + "/" + Bytes.toString(this.column) + "/" +
|
||||
timestamp;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#equals(java.lang.Object)
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(final Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
final HStoreKey other = (HStoreKey)obj;
|
||||
// Do a quick check.
|
||||
if (this.row.length != other.row.length ||
|
||||
|
@ -277,6 +292,10 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
|
|||
return compareTo(other) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#hashCode()
|
||||
*/
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int c = Bytes.hashCode(getRow());
|
||||
c ^= Bytes.hashCode(getColumn());
|
||||
|
@ -287,8 +306,11 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
|
|||
// Comparable
|
||||
|
||||
/**
|
||||
* @param o
|
||||
* @return int
|
||||
* @deprecated Use Comparators instead. This can give wrong results.
|
||||
*/
|
||||
@Deprecated
|
||||
public int compareTo(final HStoreKey o) {
|
||||
return compareTo(this, o);
|
||||
}
|
||||
|
@ -300,6 +322,7 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
|
|||
* @deprecated Use Comparators instead. This can give wrong results because
|
||||
* does not take into account special handling needed for meta and root rows.
|
||||
*/
|
||||
@Deprecated
|
||||
static int compareTo(final HStoreKey left, final HStoreKey right) {
|
||||
// We can be passed null
|
||||
if (left == null && right == null) return 0;
|
||||
|
@ -412,7 +435,7 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
|
|||
final byte [][] result = new byte [2][];
|
||||
final int index = getFamilyDelimiterIndex(c);
|
||||
if (index == -1) {
|
||||
throw new ColumnNameParseException("Impossible column name: " + c);
|
||||
throw new ColumnNameParseException("Impossible column name: " + Bytes.toString(c));
|
||||
}
|
||||
result[0] = new byte [index];
|
||||
System.arraycopy(c, 0, result[0], 0, index);
|
||||
|
@ -429,7 +452,7 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
|
|||
* buffer.
|
||||
*/
|
||||
public static int getFamilyDelimiterIndex(final byte [] b) {
|
||||
return getDelimiter(b, 0, b.length, (int)COLUMN_FAMILY_DELIMITER);
|
||||
return getDelimiter(b, 0, b.length, COLUMN_FAMILY_DELIMITER);
|
||||
}
|
||||
|
||||
private static int getRequiredDelimiterInReverse(final byte [] b,
|
||||
|
@ -735,6 +758,7 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
|
|||
* Use this comparing keys in the -ROOT_ table.
|
||||
*/
|
||||
public static class HStoreKeyRootComparator extends HStoreKeyMetaComparator {
|
||||
@Override
|
||||
protected int compareRows(byte [] left, int loffset, int llength,
|
||||
byte [] right, int roffset, int rlength) {
|
||||
return compareRootRows(left, loffset, llength, right, roffset, rlength);
|
||||
|
@ -746,6 +770,7 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
|
|||
* Use this comprator for keys in the .META. table.
|
||||
*/
|
||||
public static class HStoreKeyMetaComparator extends HStoreKeyComparator {
|
||||
@Override
|
||||
protected int compareRows(byte [] left, int loffset, int llength,
|
||||
byte [] right, int roffset, int rlength) {
|
||||
return compareMetaRows(left, loffset, llength, right, roffset, rlength);
|
||||
|
@ -760,6 +785,7 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
|
|||
super(HStoreKey.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public int compare(final WritableComparable l,
|
||||
final WritableComparable r) {
|
||||
|
@ -807,6 +833,9 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
|
|||
*/
|
||||
public static class RootStoreKeyComparator
|
||||
extends MetaStoreKeyComparator {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
@Override
|
||||
public int compareRows(byte [] left, int loffset, int llength,
|
||||
byte [] right, int roffset, int rlength) {
|
||||
return compareRootRows(left, loffset, llength, right, roffset, rlength);
|
||||
|
@ -817,6 +846,7 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
|
|||
* StoreKeyComparator for the .META. table.
|
||||
*/
|
||||
public static class MetaStoreKeyComparator extends StoreKeyComparator {
|
||||
@Override
|
||||
public int compareRows(byte [] left, int loffset, int llength,
|
||||
byte [] right, int roffset, int rlength) {
|
||||
return compareMetaRows(left, loffset, llength, right, roffset, rlength);
|
||||
|
@ -1069,8 +1099,8 @@ public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
|
|||
* @return Compatible raw comparator
|
||||
*/
|
||||
public static StoreKeyComparator getRawComparator(final HRegionInfo hri) {
|
||||
return hri.isRootRegion()? ROOT_COMPARATOR:
|
||||
hri.isMetaRegion()? META_COMPARATOR: META_COMPARATOR;
|
||||
return hri.isRootRegion() ? ROOT_COMPARATOR :
|
||||
hri.isMetaRegion() ? META_COMPARATOR : PLAIN_COMPARATOR;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -234,9 +234,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor>, I
|
|||
|
||||
private synchronized Boolean calculateIsMetaRegion() {
|
||||
byte [] value = getValue(IS_META_KEY);
|
||||
return (value != null)? new Boolean(Bytes.toString(value)): Boolean.FALSE;
|
||||
return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
|
||||
}
|
||||
|
||||
|
||||
private boolean isSomething(final ImmutableBytesWritable key,
|
||||
final boolean valueIfNull) {
|
||||
byte [] value = getValue(key);
|
||||
|
@ -513,11 +513,26 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor>, I
|
|||
return s.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#equals(java.lang.Object)
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (!(obj instanceof HTableDescriptor)) {
|
||||
return false;
|
||||
}
|
||||
return compareTo((HTableDescriptor)obj) == 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#hashCode()
|
||||
*/
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = Bytes.hashCode(this.name);
|
||||
|
|
|
@ -69,7 +69,10 @@ public class Leases extends Thread {
|
|||
this.leasePeriod = leasePeriod;
|
||||
this.leaseCheckFrequency = leaseCheckFrequency;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Thread#run()
|
||||
*/
|
||||
@Override
|
||||
public void run() {
|
||||
while (!stopRequested || (stopRequested && leaseQueue.size() > 0) ) {
|
||||
|
@ -230,6 +233,15 @@ public class Leases extends Thread {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
return this.hashCode() == ((Lease) obj).hashCode();
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||
import org.apache.hadoop.hbase.RegionException;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.TableExistsException;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
|
|
|
@ -122,14 +122,14 @@ public class HConnectionManager implements HConstants {
|
|||
private final int numRetries;
|
||||
private final int maxRPCAttempts;
|
||||
|
||||
private final Integer masterLock = new Integer(0);
|
||||
private final Object masterLock = new Object();
|
||||
private volatile boolean closed;
|
||||
private volatile HMasterInterface master;
|
||||
private volatile boolean masterChecked;
|
||||
|
||||
private final Integer rootRegionLock = new Integer(0);
|
||||
private final Integer metaRegionLock = new Integer(0);
|
||||
private final Integer userRegionLock = new Integer(0);
|
||||
private final Object rootRegionLock = new Object();
|
||||
private final Object metaRegionLock = new Object();
|
||||
private final Object userRegionLock = new Object();
|
||||
|
||||
private volatile HBaseConfiguration conf;
|
||||
|
||||
|
@ -405,7 +405,7 @@ public class HConnectionManager implements HConstants {
|
|||
|
||||
}
|
||||
|
||||
private class HTableDescriptorFinder
|
||||
private static class HTableDescriptorFinder
|
||||
implements MetaScanner.MetaScannerVisitor {
|
||||
byte[] tableName;
|
||||
HTableDescriptor result;
|
||||
|
|
|
@ -596,6 +596,7 @@ public class HTable {
|
|||
* @param row row key
|
||||
* @param columns Array of column names and families you want to retrieve.
|
||||
* @param ts timestamp
|
||||
* @param numVersions
|
||||
* @param rl row lock
|
||||
* @return RowResult is empty if row does not exist.
|
||||
* @throws IOException
|
||||
|
|
|
@ -5,7 +5,6 @@ import java.io.IOException;
|
|||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
|
|
|
@ -67,7 +67,10 @@ public class ScannerCallable extends ServerCallable<RowResult[]> {
|
|||
instantiated = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.util.concurrent.Callable#call()
|
||||
*/
|
||||
public RowResult[] call() throws IOException {
|
||||
if (scannerId != -1L && closed) {
|
||||
server.close(scannerId);
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
|
|||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
|
||||
/**
|
||||
* Abstract class that implemetns Callable, used by retryable actions.
|
||||
* Abstract class that implements Callable, used by retryable actions.
|
||||
* @param <T> the class that the ServerCallable handles
|
||||
*/
|
||||
public abstract class ServerCallable<T> implements Callable<T> {
|
||||
|
|
|
@ -15,46 +15,73 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor {
|
|||
super(desc);
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(byte[], byte[])
|
||||
*/
|
||||
@Override
|
||||
public void setValue(byte[] key, byte[] value) {
|
||||
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(java.lang.String, java.lang.String)
|
||||
*/
|
||||
@Override
|
||||
public void setValue(String key, String value) {
|
||||
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setMaxVersions(int)
|
||||
*/
|
||||
@Override
|
||||
public void setMaxVersions(int maxVersions) {
|
||||
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean)
|
||||
*/
|
||||
@Override
|
||||
public void setInMemory(boolean inMemory) {
|
||||
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setBlockCacheEnabled(boolean)
|
||||
*/
|
||||
@Override
|
||||
public void setBlockCacheEnabled(boolean blockCacheEnabled) {
|
||||
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setMaxValueLength(int)
|
||||
*/
|
||||
@Override
|
||||
public void setMaxValueLength(int maxLength) {
|
||||
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setTimeToLive(int)
|
||||
*/
|
||||
@Override
|
||||
public void setTimeToLive(int timeToLive) {
|
||||
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setCompressionType(org.apache.hadoop.hbase.io.hfile.Compression.Algorithm)
|
||||
*/
|
||||
@Override
|
||||
public void setCompressionType(Compression.Algorithm type) {
|
||||
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setMapFileIndexInterval(int)
|
||||
*/
|
||||
@Override
|
||||
public void setMapFileIndexInterval(int interval) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
|
|
|
@ -74,36 +74,57 @@ public class UnmodifyableHTableDescriptor extends HTableDescriptor {
|
|||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HTableDescriptor#setInMemory(boolean)
|
||||
*/
|
||||
@Override
|
||||
public void setInMemory(boolean inMemory) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HTableDescriptor#setReadOnly(boolean)
|
||||
*/
|
||||
@Override
|
||||
public void setReadOnly(boolean readOnly) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HTableDescriptor#setValue(byte[], byte[])
|
||||
*/
|
||||
@Override
|
||||
public void setValue(byte[] key, byte[] value) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HTableDescriptor#setValue(java.lang.String, java.lang.String)
|
||||
*/
|
||||
@Override
|
||||
public void setValue(String key, String value) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HTableDescriptor#setMaxFileSize(long)
|
||||
*/
|
||||
@Override
|
||||
public void setMaxFileSize(long maxFileSize) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HTableDescriptor#setMemcacheFlushSize(int)
|
||||
*/
|
||||
@Override
|
||||
public void setMemcacheFlushSize(int memcacheFlushSize) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HTableDescriptor#addIndex(org.apache.hadoop.hbase.client.tableindexed.IndexSpecification)
|
||||
*/
|
||||
@Override
|
||||
public void addIndex(IndexSpecification index) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
|
|
|
@ -46,7 +46,10 @@ public class IndexSpecification implements Writable {
|
|||
// Id of this index, unique within a table.
|
||||
private String indexId;
|
||||
|
||||
/** Construct an "simple" index spec for a single column. */
|
||||
/** Construct an "simple" index spec for a single column.
|
||||
* @param indexId
|
||||
* @param indexedColumn
|
||||
*/
|
||||
public IndexSpecification(String indexId, byte[] indexedColumn) {
|
||||
this(indexId, new byte[][] { indexedColumn }, null,
|
||||
new SimpleIndexKeyGenerator(indexedColumn));
|
||||
|
|
|
@ -42,8 +42,8 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
public class IndexedTable extends TransactionalTable {
|
||||
|
||||
// FIXME, these belong elsewhere
|
||||
public static final byte[] INDEX_COL_FAMILY_NAME = Bytes.toBytes("__INDEX__");
|
||||
public static final byte[] INDEX_COL_FAMILY = Bytes.add(
|
||||
static final byte[] INDEX_COL_FAMILY_NAME = Bytes.toBytes("__INDEX__");
|
||||
static final byte[] INDEX_COL_FAMILY = Bytes.add(
|
||||
INDEX_COL_FAMILY_NAME, new byte[] { HStoreKey.COLUMN_FAMILY_DELIMITER });
|
||||
public static final byte[] INDEX_BASE_ROW_COLUMN = Bytes.add(
|
||||
INDEX_COL_FAMILY, Bytes.toBytes("ROW"));
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||
import org.apache.hadoop.hbase.TableExistsException;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
|
|
|
@ -67,6 +67,9 @@ public class TransactionState {
|
|||
return transactionId;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return "id: " + transactionId + ", particpants: "
|
||||
|
|
|
@ -40,7 +40,10 @@ public class InclusiveStopRowFilter extends StopRowFilter{
|
|||
public InclusiveStopRowFilter(final byte [] stopRowKey) {
|
||||
super(stopRowKey);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.filter.StopRowFilter#filterRowKey(byte[])
|
||||
*/
|
||||
@Override
|
||||
public boolean filterRowKey(final byte [] rowKey) {
|
||||
if (rowKey == null) {
|
||||
|
|
|
@ -35,6 +35,7 @@ public class PrefixRowFilter implements RowFilterInterface {
|
|||
|
||||
/**
|
||||
* Constructor that takes a row prefix to filter on
|
||||
* @param prefix
|
||||
*/
|
||||
public PrefixRowFilter(byte[] prefix) {
|
||||
this.prefix = prefix;
|
||||
|
|
|
@ -94,6 +94,10 @@ public class StopRowFilter implements RowFilterInterface {
|
|||
/**
|
||||
* Because StopRowFilter does not examine column information, this method
|
||||
* defaults to calling the rowKey-only version of filter.
|
||||
* @param rowKey
|
||||
* @param colKey
|
||||
* @param data
|
||||
* @return boolean
|
||||
*/
|
||||
public boolean filterColumn(final byte [] rowKey, final byte [] colKey,
|
||||
final byte[] data) {
|
||||
|
@ -103,6 +107,8 @@ public class StopRowFilter implements RowFilterInterface {
|
|||
/**
|
||||
* Because StopRowFilter does not examine column information, this method
|
||||
* defaults to calling filterAllRemaining().
|
||||
* @param columns
|
||||
* @return boolean
|
||||
*/
|
||||
public boolean filterRow(final SortedMap<byte [], Cell> columns) {
|
||||
return filterAllRemaining();
|
||||
|
|
|
@ -110,6 +110,9 @@ public class BatchOperation implements Writable, HeapSize {
|
|||
return this.value != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return "column => " + Bytes.toString(this.column) + ", value => '...'";
|
||||
|
|
|
@ -299,7 +299,10 @@ implements WritableComparable<BatchUpdate>, Iterable<BatchOperation>, HeapSize {
|
|||
public Iterator<BatchOperation> iterator() {
|
||||
return operations.iterator();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
|
|
|
@ -122,16 +122,25 @@ public class BlockFSInputStream extends FSInputStream {
|
|||
}, 1, 1, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.fs.FSInputStream#getPos()
|
||||
*/
|
||||
@Override
|
||||
public synchronized long getPos() {
|
||||
return pos;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.io.InputStream#available()
|
||||
*/
|
||||
@Override
|
||||
public synchronized int available() {
|
||||
return (int) (fileLength - pos);
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.fs.FSInputStream#seek(long)
|
||||
*/
|
||||
@Override
|
||||
public synchronized void seek(long targetPos) throws IOException {
|
||||
if (targetPos > fileLength) {
|
||||
|
@ -141,12 +150,18 @@ public class BlockFSInputStream extends FSInputStream {
|
|||
blockEnd = -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.fs.FSInputStream#seekToNewSource(long)
|
||||
*/
|
||||
@Override
|
||||
public synchronized boolean seekToNewSource(long targetPos)
|
||||
throws IOException {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.io.InputStream#read()
|
||||
*/
|
||||
@Override
|
||||
public synchronized int read() throws IOException {
|
||||
if (closed) {
|
||||
|
@ -165,6 +180,9 @@ public class BlockFSInputStream extends FSInputStream {
|
|||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.io.InputStream#read(byte[], int, int)
|
||||
*/
|
||||
@Override
|
||||
public synchronized int read(byte buf[], int off, int len) throws IOException {
|
||||
if (closed) {
|
||||
|
@ -206,6 +224,9 @@ public class BlockFSInputStream extends FSInputStream {
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.io.InputStream#close()
|
||||
*/
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (closed) {
|
||||
|
@ -234,11 +255,17 @@ public class BlockFSInputStream extends FSInputStream {
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.io.InputStream#mark(int)
|
||||
*/
|
||||
@Override
|
||||
public void mark(int readLimit) {
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.io.InputStream#reset()
|
||||
*/
|
||||
@Override
|
||||
public void reset() throws IOException {
|
||||
throw new IOException("Mark not supported");
|
||||
|
|
|
@ -45,7 +45,6 @@ import org.onelab.filter.Key;
|
|||
// TODO should be fixed generic warnings from MapFile methods
|
||||
@SuppressWarnings("unchecked")
|
||||
public class BloomFilterMapFile extends HBaseMapFile {
|
||||
@SuppressWarnings("hiding")
|
||||
static final Log LOG = LogFactory.getLog(BloomFilterMapFile.class);
|
||||
protected static final String BLOOMFILTER_FILE_NAME = "filter";
|
||||
|
||||
|
@ -89,7 +88,10 @@ public class BloomFilterMapFile extends HBaseMapFile {
|
|||
}
|
||||
return filter;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.io.MapFile.Reader#get(org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable)
|
||||
*/
|
||||
@Override
|
||||
public Writable get(WritableComparable key, Writable val)
|
||||
throws IOException {
|
||||
|
@ -108,6 +110,9 @@ public class BloomFilterMapFile extends HBaseMapFile {
|
|||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.io.MapFile.Reader#getClosest(org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable)
|
||||
*/
|
||||
@Override
|
||||
public WritableComparable getClosest(WritableComparable key,
|
||||
Writable val) throws IOException {
|
||||
|
@ -199,6 +204,9 @@ public class BloomFilterMapFile extends HBaseMapFile {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.io.MapFile.Writer#append(org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable)
|
||||
*/
|
||||
@Override
|
||||
public void append(WritableComparable key, Writable val)
|
||||
throws IOException {
|
||||
|
@ -208,6 +216,9 @@ public class BloomFilterMapFile extends HBaseMapFile {
|
|||
super.append(key, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.io.MapFile.Writer#close()
|
||||
*/
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
super.close();
|
||||
|
|
|
@ -153,6 +153,9 @@ public class Cell implements Writable, Iterable<Map.Entry<Long, byte[]>>,
|
|||
valueMap.put(ts, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
if (valueMap.size() == 1) {
|
||||
|
@ -225,9 +228,7 @@ public class Cell implements Writable, Iterable<Map.Entry<Long, byte[]>>,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
*
|
||||
/**
|
||||
* @see
|
||||
* org.apache.hadoop.hbase.rest.serializer.ISerializable#restSerialize(org
|
||||
* .apache.hadoop.hbase.rest.serializer.IRestSerializer)
|
||||
|
|
|
@ -98,25 +98,37 @@ public class DataOutputBuffer extends DataOutputStream {
|
|||
|
||||
/** Returns the current contents of the buffer.
|
||||
* Data is only valid to {@link #getLength()}.
|
||||
* @return byte[]
|
||||
*/
|
||||
public byte[] getData() { return buffer.getData(); }
|
||||
|
||||
/** Returns the length of the valid data currently in the buffer. */
|
||||
/** Returns the length of the valid data currently in the buffer.
|
||||
* @return int
|
||||
*/
|
||||
public int getLength() { return buffer.getLength(); }
|
||||
|
||||
/** Resets the buffer to empty. */
|
||||
/** Resets the buffer to empty.
|
||||
* @return DataOutputBuffer
|
||||
*/
|
||||
public DataOutputBuffer reset() {
|
||||
this.written = 0;
|
||||
buffer.reset();
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Writes bytes from a DataInput directly into the buffer. */
|
||||
/** Writes bytes from a DataInput directly into the buffer.
|
||||
* @param in
|
||||
* @param length
|
||||
* @throws IOException
|
||||
*/
|
||||
public void write(DataInput in, int length) throws IOException {
|
||||
buffer.write(in, length);
|
||||
}
|
||||
|
||||
/** Write to a file stream */
|
||||
/** Write to a file stream
|
||||
* @param out
|
||||
* @throws IOException
|
||||
*/
|
||||
public void writeTo(OutputStream out) throws IOException {
|
||||
buffer.writeTo(out);
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
|
|||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* A facade for a {@link org.apache.hadoop.io.hfile.HFile.Reader} that serves up
|
||||
* A facade for a {@link org.apache.hadoop.hbase.io.hfile.HFile.Reader} that serves up
|
||||
* either the top or bottom half of a HFile where 'bottom' is the first half
|
||||
* of the file containing the keys that sort lowest and 'top' is the second half
|
||||
* of the file with keys that sort greater than those of the bottom half.
|
||||
|
@ -43,18 +43,18 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
* <p>This type works in tandem with the {@link Reference} type. This class
|
||||
* is used reading while Reference is used writing.
|
||||
*
|
||||
* <p>This file is not splitable. Calls to {@link #midKey()} return null.
|
||||
* <p>This file is not splitable. Calls to {@link #midkey()} return null.
|
||||
*/
|
||||
public class HalfHFileReader extends HFile.Reader {
|
||||
static final Log LOG = LogFactory.getLog(HalfHFileReader.class);
|
||||
private final boolean top;
|
||||
protected final boolean top;
|
||||
// This is the key we split around. Its the first possible entry on a row:
|
||||
// i.e. empty column and a timestamp of LATEST_TIMESTAMP.
|
||||
private final byte [] splitkey;
|
||||
protected final byte [] splitkey;
|
||||
|
||||
/**
|
||||
* @param fs
|
||||
* @param f
|
||||
* @param p
|
||||
* @param c
|
||||
* @param r
|
||||
* @throws IOException
|
||||
|
@ -77,6 +77,7 @@ public class HalfHFileReader extends HFile.Reader {
|
|||
return this.top;
|
||||
}
|
||||
|
||||
@Override
|
||||
public HFileScanner getScanner() {
|
||||
final HFileScanner s = super.getScanner();
|
||||
return new HFileScanner() {
|
||||
|
@ -179,22 +180,23 @@ public class HalfHFileReader extends HFile.Reader {
|
|||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getLastKey() {
|
||||
if (top) {
|
||||
return super.getLastKey();
|
||||
} else {
|
||||
HFileScanner scanner = getScanner();
|
||||
try {
|
||||
if (scanner.seekBefore(this.splitkey)) {
|
||||
return Bytes.toBytes(scanner.getKey());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Failed seekBefore " + Bytes.toString(this.splitkey), e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
HFileScanner scanner = getScanner();
|
||||
try {
|
||||
if (scanner.seekBefore(this.splitkey)) {
|
||||
return Bytes.toBytes(scanner.getKey());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Failed seekBefore " + Bytes.toString(this.splitkey), e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] midkey() throws IOException {
|
||||
// Returns null to indicate file is not splitable.
|
||||
return null;
|
||||
|
|
|
@ -168,6 +168,9 @@ implements SortedMap<byte[],V>, Configurable, Writable, CodeToClassAndBack{
|
|||
return b;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return this.instance.toString();
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
|||
import org.apache.hadoop.hbase.filter.RowFilterSet;
|
||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.io.MapWritable;
|
||||
import org.apache.hadoop.io.ObjectWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
|
@ -173,7 +174,10 @@ public class HbaseObjectWritable implements Writable, Configurable {
|
|||
this.declaredClass = instance.getClass();
|
||||
this.instance = instance;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return "OW[class=" + declaredClass + ",value=" + instance + "]";
|
||||
|
|
|
@ -117,7 +117,7 @@ public class ImmutableBytesWritable implements WritableComparable<ImmutableBytes
|
|||
}
|
||||
|
||||
// Below methods copied from BytesWritable
|
||||
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return WritableComparator.hashBytes(bytes, this.bytes.length);
|
||||
|
@ -146,7 +146,10 @@ public class ImmutableBytesWritable implements WritableComparable<ImmutableBytes
|
|||
WritableComparator.compareBytes(this.bytes, 0, this.bytes.length, that,
|
||||
0, that.length);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#equals(java.lang.Object)
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object right_obj) {
|
||||
if (right_obj instanceof byte []) {
|
||||
|
@ -157,7 +160,10 @@ public class ImmutableBytesWritable implements WritableComparable<ImmutableBytes
|
|||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer sb = new StringBuffer(3*this.bytes.length);
|
||||
|
@ -186,7 +192,10 @@ public class ImmutableBytesWritable implements WritableComparable<ImmutableBytes
|
|||
public Comparator() {
|
||||
super(ImmutableBytesWritable.class);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.io.WritableComparator#compare(byte[], int, int, byte[], int, int)
|
||||
*/
|
||||
@Override
|
||||
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
|
||||
return comparator.compare(b1, s1, l1, b2, s2, l2);
|
||||
|
|
|
@ -55,7 +55,7 @@ import org.apache.hadoop.io.compress.DefaultCodec;
|
|||
* SequenceFile.Sorter}.
|
||||
*/
|
||||
public class MapFile {
|
||||
private static final Log LOG = LogFactory.getLog(MapFile.class);
|
||||
protected static final Log LOG = LogFactory.getLog(MapFile.class);
|
||||
|
||||
/** The name of the index file. */
|
||||
public static final String INDEX_FILE_NAME = "index";
|
||||
|
@ -83,7 +83,14 @@ public class MapFile {
|
|||
private WritableComparable lastKey;
|
||||
|
||||
|
||||
/** Create the named map for keys of the named class. */
|
||||
/** Create the named map for keys of the named class.
|
||||
* @param conf
|
||||
* @param fs
|
||||
* @param dirName
|
||||
* @param keyClass
|
||||
* @param valClass
|
||||
* @throws IOException
|
||||
*/
|
||||
public Writer(Configuration conf, FileSystem fs, String dirName,
|
||||
Class<? extends WritableComparable> keyClass, Class valClass)
|
||||
throws IOException {
|
||||
|
@ -92,7 +99,16 @@ public class MapFile {
|
|||
SequenceFile.getCompressionType(conf));
|
||||
}
|
||||
|
||||
/** Create the named map for keys of the named class. */
|
||||
/** Create the named map for keys of the named class.
|
||||
* @param conf
|
||||
* @param fs
|
||||
* @param dirName
|
||||
* @param keyClass
|
||||
* @param valClass
|
||||
* @param compress
|
||||
* @param progress
|
||||
* @throws IOException
|
||||
*/
|
||||
public Writer(Configuration conf, FileSystem fs, String dirName,
|
||||
Class<? extends WritableComparable> keyClass, Class valClass,
|
||||
CompressionType compress, Progressable progress)
|
||||
|
@ -111,7 +127,15 @@ public class MapFile {
|
|||
compress, codec, progress);
|
||||
}
|
||||
|
||||
/** Create the named map for keys of the named class. */
|
||||
/** Create the named map for keys of the named class.
|
||||
* @param conf
|
||||
* @param fs
|
||||
* @param dirName
|
||||
* @param keyClass
|
||||
* @param valClass
|
||||
* @param compress
|
||||
* @throws IOException
|
||||
*/
|
||||
public Writer(Configuration conf, FileSystem fs, String dirName,
|
||||
Class<? extends WritableComparable> keyClass, Class valClass,
|
||||
CompressionType compress)
|
||||
|
@ -119,21 +143,45 @@ public class MapFile {
|
|||
this(conf, fs, dirName, WritableComparator.get(keyClass), valClass, compress);
|
||||
}
|
||||
|
||||
/** Create the named map using the named key comparator. */
|
||||
/** Create the named map using the named key comparator.
|
||||
* @param conf
|
||||
* @param fs
|
||||
* @param dirName
|
||||
* @param comparator
|
||||
* @param valClass
|
||||
* @throws IOException
|
||||
*/
|
||||
public Writer(Configuration conf, FileSystem fs, String dirName,
|
||||
WritableComparator comparator, Class valClass)
|
||||
throws IOException {
|
||||
this(conf, fs, dirName, comparator, valClass,
|
||||
SequenceFile.getCompressionType(conf));
|
||||
}
|
||||
/** Create the named map using the named key comparator. */
|
||||
/** Create the named map using the named key comparator.
|
||||
* @param conf
|
||||
* @param fs
|
||||
* @param dirName
|
||||
* @param comparator
|
||||
* @param valClass
|
||||
* @param compress
|
||||
* @throws IOException
|
||||
*/
|
||||
public Writer(Configuration conf, FileSystem fs, String dirName,
|
||||
WritableComparator comparator, Class valClass,
|
||||
SequenceFile.CompressionType compress)
|
||||
throws IOException {
|
||||
this(conf, fs, dirName, comparator, valClass, compress, null);
|
||||
}
|
||||
/** Create the named map using the named key comparator. */
|
||||
/** Create the named map using the named key comparator.
|
||||
* @param conf
|
||||
* @param fs
|
||||
* @param dirName
|
||||
* @param comparator
|
||||
* @param valClass
|
||||
* @param compress
|
||||
* @param progress
|
||||
* @throws IOException
|
||||
*/
|
||||
public Writer(Configuration conf, FileSystem fs, String dirName,
|
||||
WritableComparator comparator, Class valClass,
|
||||
SequenceFile.CompressionType compress,
|
||||
|
@ -142,7 +190,17 @@ public class MapFile {
|
|||
this(conf, fs, dirName, comparator, valClass,
|
||||
compress, new DefaultCodec(), progress);
|
||||
}
|
||||
/** Create the named map using the named key comparator. */
|
||||
/** Create the named map using the named key comparator.
|
||||
* @param conf
|
||||
* @param fs
|
||||
* @param dirName
|
||||
* @param comparator
|
||||
* @param valClass
|
||||
* @param compress
|
||||
* @param codec
|
||||
* @param progress
|
||||
* @throws IOException
|
||||
*/
|
||||
public Writer(Configuration conf, FileSystem fs, String dirName,
|
||||
WritableComparator comparator, Class valClass,
|
||||
SequenceFile.CompressionType compress, CompressionCodec codec,
|
||||
|
@ -182,6 +240,8 @@ public class MapFile {
|
|||
public void setIndexInterval(int interval) { indexInterval = interval; }
|
||||
|
||||
/** Sets the index interval and stores it in conf
|
||||
* @param conf
|
||||
* @param interval
|
||||
* @see #getIndexInterval()
|
||||
*/
|
||||
public static void setIndexInterval(Configuration conf, int interval) {
|
||||
|
@ -265,13 +325,24 @@ public class MapFile {
|
|||
*/
|
||||
public Class<?> getValueClass() { return data.getValueClass(); }
|
||||
|
||||
/** Construct a map reader for the named map.*/
|
||||
/** Construct a map reader for the named map.
|
||||
* @param fs
|
||||
* @param dirName
|
||||
* @param conf
|
||||
* @throws IOException
|
||||
*/
|
||||
public Reader(FileSystem fs, String dirName, Configuration conf) throws IOException {
|
||||
this(fs, dirName, null, conf);
|
||||
INDEX_SKIP = conf.getInt("io.map.index.skip", 0);
|
||||
}
|
||||
|
||||
/** Construct a map reader for the named map using the named comparator.*/
|
||||
/** Construct a map reader for the named map using the named comparator.
|
||||
* @param fs
|
||||
* @param dirName
|
||||
* @param comparator
|
||||
* @param conf
|
||||
* @throws IOException
|
||||
*/
|
||||
public Reader(FileSystem fs, String dirName, WritableComparator comparator, Configuration conf)
|
||||
throws IOException {
|
||||
this(fs, dirName, comparator, conf, true);
|
||||
|
@ -344,9 +415,8 @@ public class MapFile {
|
|||
if (skip > 0) {
|
||||
skip--;
|
||||
continue; // skip this entry
|
||||
} else {
|
||||
skip = INDEX_SKIP; // reset skip
|
||||
}
|
||||
skip = INDEX_SKIP; // reset skip
|
||||
|
||||
if (count == keys.length) { // time to grow arrays
|
||||
int newLength = (keys.length*3)/2;
|
||||
|
@ -541,8 +611,8 @@ public class MapFile {
|
|||
if (seek(key)) {
|
||||
data.getCurrentValue(val);
|
||||
return val;
|
||||
} else
|
||||
return null;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
|
@ -587,7 +657,12 @@ public class MapFile {
|
|||
|
||||
}
|
||||
|
||||
/** Renames an existing map directory. */
|
||||
/** Renames an existing map directory.
|
||||
* @param fs
|
||||
* @param oldName
|
||||
* @param newName
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void rename(FileSystem fs, String oldName, String newName)
|
||||
throws IOException {
|
||||
Path oldDir = new Path(oldName);
|
||||
|
@ -597,7 +672,11 @@ public class MapFile {
|
|||
}
|
||||
}
|
||||
|
||||
/** Deletes the named map file. */
|
||||
/** Deletes the named map file.
|
||||
* @param fs
|
||||
* @param name
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void delete(FileSystem fs, String name) throws IOException {
|
||||
Path dir = new Path(name);
|
||||
Path data = new Path(dir, DATA_FILE_NAME);
|
||||
|
@ -615,6 +694,7 @@ public class MapFile {
|
|||
* @param keyClass key class (has to be a subclass of Writable)
|
||||
* @param valueClass value class (has to be a subclass of Writable)
|
||||
* @param dryrun do not perform any changes, just report what needs to be done
|
||||
* @param conf
|
||||
* @return number of valid entries in this MapFile, or -1 if no fixing was needed
|
||||
* @throws Exception
|
||||
*/
|
||||
|
|
|
@ -48,7 +48,6 @@ public class Reference implements Writable {
|
|||
|
||||
/**
|
||||
* Constructor
|
||||
* @param r
|
||||
* @param s This is a serialized storekey with the row we are to split on,
|
||||
* an empty column and a timestamp of the LATEST_TIMESTAMP. This is the first
|
||||
* possible entry in a row. This is what we are splitting around.
|
||||
|
@ -74,6 +73,10 @@ public class Reference implements Writable {
|
|||
return splitkey;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return "" + this.region;
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ import agilejson.TOJSON;
|
|||
* Holds row name and then a map of columns to cells.
|
||||
*/
|
||||
public class RowResult implements Writable, SortedMap<byte [], Cell>,
|
||||
Comparable, ISerializable {
|
||||
Comparable<RowResult>, ISerializable {
|
||||
private byte [] row = null;
|
||||
private final HbaseMapWritable<byte [], Cell> cells;
|
||||
|
||||
|
@ -79,13 +79,12 @@ public class RowResult implements Writable, SortedMap<byte [], Cell>,
|
|||
// Map interface
|
||||
//
|
||||
|
||||
public Cell put(@SuppressWarnings("unused") byte [] key,
|
||||
@SuppressWarnings("unused") Cell value) {
|
||||
public Cell put(byte [] key, Cell value) {
|
||||
throw new UnsupportedOperationException("RowResult is read-only!");
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void putAll(@SuppressWarnings("unused") Map map) {
|
||||
public void putAll(Map map) {
|
||||
throw new UnsupportedOperationException("RowResult is read-only!");
|
||||
}
|
||||
|
||||
|
@ -93,7 +92,7 @@ public class RowResult implements Writable, SortedMap<byte [], Cell>,
|
|||
return this.cells.get(key);
|
||||
}
|
||||
|
||||
public Cell remove(@SuppressWarnings("unused") Object key) {
|
||||
public Cell remove(Object key) {
|
||||
throw new UnsupportedOperationException("RowResult is read-only!");
|
||||
}
|
||||
|
||||
|
@ -105,7 +104,7 @@ public class RowResult implements Writable, SortedMap<byte [], Cell>,
|
|||
return cells.containsKey(Bytes.toBytes(key));
|
||||
}
|
||||
|
||||
public boolean containsValue(@SuppressWarnings("unused") Object value) {
|
||||
public boolean containsValue(Object value) {
|
||||
throw new UnsupportedOperationException("Don't support containsValue!");
|
||||
}
|
||||
|
||||
|
@ -212,7 +211,7 @@ public class RowResult implements Writable, SortedMap<byte [], Cell>,
|
|||
this.cell = cell;
|
||||
}
|
||||
|
||||
public Cell setValue(@SuppressWarnings("unused") Cell c) {
|
||||
public Cell setValue(Cell c) {
|
||||
throw new UnsupportedOperationException("RowResult is read-only!");
|
||||
}
|
||||
|
||||
|
@ -289,7 +288,7 @@ public class RowResult implements Writable, SortedMap<byte [], Cell>,
|
|||
* @param o the RowResult Object to compare to
|
||||
* @return the compare number
|
||||
*/
|
||||
public int compareTo(Object o){
|
||||
return Bytes.compareTo(this.row, ((RowResult)o).getRow());
|
||||
public int compareTo(RowResult o){
|
||||
return Bytes.compareTo(this.row, o.getRow());
|
||||
}
|
||||
}
|
|
@ -205,7 +205,7 @@ public class SequenceFile {
|
|||
private static final byte BLOCK_COMPRESS_VERSION = (byte)4;
|
||||
private static final byte CUSTOM_COMPRESS_VERSION = (byte)5;
|
||||
private static final byte VERSION_WITH_METADATA = (byte)6;
|
||||
private static byte[] VERSION = new byte[] {
|
||||
protected static byte[] VERSION = new byte[] {
|
||||
(byte)'S', (byte)'E', (byte)'Q', VERSION_WITH_METADATA
|
||||
};
|
||||
|
||||
|
@ -594,12 +594,15 @@ public class SequenceFile {
|
|||
/** Write compressed bytes to outStream.
|
||||
* Note: that it will NOT compress the bytes if they are not compressed.
|
||||
* @param outStream : Stream to write compressed bytes into.
|
||||
* @throws IllegalArgumentException
|
||||
* @throws IOException
|
||||
*/
|
||||
public void writeCompressedBytes(DataOutputStream outStream)
|
||||
throws IllegalArgumentException, IOException;
|
||||
|
||||
/**
|
||||
* Size of stored data.
|
||||
* @return int
|
||||
*/
|
||||
public int getSize();
|
||||
}
|
||||
|
@ -770,11 +773,13 @@ public class SequenceFile {
|
|||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
assert false : "hashCode not designed";
|
||||
return 42; // any arbitrary constant will do
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer sb = new StringBuffer();
|
||||
sb.append("size: ").append(this.theMetadata.size()).append("\n");
|
||||
|
@ -830,14 +835,30 @@ public class SequenceFile {
|
|||
Writer()
|
||||
{}
|
||||
|
||||
/** Create the named file. */
|
||||
/** Create the named file.
|
||||
* @param fs
|
||||
* @param conf
|
||||
* @param name
|
||||
* @param keyClass
|
||||
* @param valClass
|
||||
* @throws IOException
|
||||
*/
|
||||
public Writer(FileSystem fs, Configuration conf, Path name,
|
||||
Class keyClass, Class valClass)
|
||||
throws IOException {
|
||||
this(fs, conf, name, keyClass, valClass, null, new Metadata());
|
||||
}
|
||||
|
||||
/** Create the named file with write-progress reporter. */
|
||||
/** Create the named file with write-progress reporter.
|
||||
* @param fs
|
||||
* @param conf
|
||||
* @param name
|
||||
* @param keyClass
|
||||
* @param valClass
|
||||
* @param progress
|
||||
* @param metadata
|
||||
* @throws IOException
|
||||
*/
|
||||
public Writer(FileSystem fs, Configuration conf, Path name,
|
||||
Class keyClass, Class valClass,
|
||||
Progressable progress, Metadata metadata)
|
||||
|
@ -848,13 +869,25 @@ public class SequenceFile {
|
|||
progress, metadata);
|
||||
}
|
||||
|
||||
/** Create the named file with write-progress reporter. */
|
||||
/** Create the named file with write-progress reporter.
|
||||
* @param fs
|
||||
* @param conf
|
||||
* @param name
|
||||
* @param keyClass
|
||||
* @param valClass
|
||||
* @param bufferSize
|
||||
* @param replication
|
||||
* @param blockSize
|
||||
* @param progress
|
||||
* @param metadata
|
||||
* @throws IOException
|
||||
*/
|
||||
public Writer(FileSystem fs, Configuration conf, Path name,
|
||||
Class keyClass, Class valClass,
|
||||
int bufferSize, short replication, long blockSize,
|
||||
Progressable progress, Metadata metadata)
|
||||
throws IOException {
|
||||
init(name, conf,
|
||||
init(conf,
|
||||
fs.create(name, true, bufferSize, replication, blockSize, progress),
|
||||
keyClass, valClass, false, null, metadata);
|
||||
initializeFileHeader();
|
||||
|
@ -863,11 +896,11 @@ public class SequenceFile {
|
|||
}
|
||||
|
||||
/** Write to an arbitrary stream using a specified buffer size. */
|
||||
private Writer(Configuration conf, FSDataOutputStream out,
|
||||
Class keyClass, Class valClass, Metadata metadata)
|
||||
protected Writer(Configuration conf, FSDataOutputStream out,
|
||||
Class keyClass, Class valClass, Metadata metadata)
|
||||
throws IOException {
|
||||
this.ownOutputStream = false;
|
||||
init(null, conf, out, keyClass, valClass, false, null, metadata);
|
||||
init(conf, out, keyClass, valClass, false, null, metadata);
|
||||
|
||||
initializeFileHeader();
|
||||
writeFileHeader();
|
||||
|
@ -907,7 +940,7 @@ public class SequenceFile {
|
|||
|
||||
/** Initialize. */
|
||||
@SuppressWarnings("unchecked")
|
||||
void init(Path name, Configuration conf, FSDataOutputStream out,
|
||||
void init(Configuration conf, FSDataOutputStream out,
|
||||
Class keyClass, Class valClass,
|
||||
boolean compress, CompressionCodec codec, Metadata metadata)
|
||||
throws IOException {
|
||||
|
@ -934,16 +967,24 @@ public class SequenceFile {
|
|||
}
|
||||
}
|
||||
|
||||
/** Returns the class of keys in this file. */
|
||||
/** Returns the class of keys in this file.
|
||||
* @return Class
|
||||
*/
|
||||
public Class getKeyClass() { return keyClass; }
|
||||
|
||||
/** Returns the class of values in this file. */
|
||||
/** Returns the class of values in this file.
|
||||
* @return Class
|
||||
*/
|
||||
public Class getValueClass() { return valClass; }
|
||||
|
||||
/** Returns the compression codec of data in this file. */
|
||||
/** Returns the compression codec of data in this file.
|
||||
* @return CompressionCodec
|
||||
*/
|
||||
public CompressionCodec getCompressionCodec() { return codec; }
|
||||
|
||||
/** create a sync point */
|
||||
/** create a sync point
|
||||
* @throws IOException
|
||||
*/
|
||||
public void sync() throws IOException {
|
||||
if (sync != null && lastSyncPos != out.getPos()) {
|
||||
out.writeInt(SYNC_ESCAPE); // mark the start of the sync
|
||||
|
@ -955,7 +996,9 @@ public class SequenceFile {
|
|||
/** Returns the configuration of this file. */
|
||||
Configuration getConf() { return conf; }
|
||||
|
||||
/** Close the file. */
|
||||
/** Close the file.
|
||||
* @throws IOException
|
||||
*/
|
||||
public synchronized void close() throws IOException {
|
||||
keySerializer.close();
|
||||
uncompressedValSerializer.close();
|
||||
|
@ -985,13 +1028,21 @@ public class SequenceFile {
|
|||
}
|
||||
}
|
||||
|
||||
/** Append a key/value pair. */
|
||||
/** Append a key/value pair.
|
||||
* @param key
|
||||
* @param val
|
||||
* @throws IOException
|
||||
*/
|
||||
public synchronized void append(Writable key, Writable val)
|
||||
throws IOException {
|
||||
append((Object) key, (Object) val);
|
||||
}
|
||||
|
||||
/** Append a key/value pair. */
|
||||
/** Append a key/value pair.
|
||||
* @param key
|
||||
* @param val
|
||||
* @throws IOException
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public synchronized void append(Object key, Object val)
|
||||
throws IOException {
|
||||
|
@ -1060,14 +1111,32 @@ public class SequenceFile {
|
|||
/** Write key/compressed-value pairs to a sequence-format file. */
|
||||
static class RecordCompressWriter extends Writer {
|
||||
|
||||
/** Create the named file. */
|
||||
/** Create the named file.
|
||||
* @param fs
|
||||
* @param conf
|
||||
* @param name
|
||||
* @param keyClass
|
||||
* @param valClass
|
||||
* @param codec
|
||||
* @throws IOException
|
||||
*/
|
||||
public RecordCompressWriter(FileSystem fs, Configuration conf, Path name,
|
||||
Class keyClass, Class valClass, CompressionCodec codec)
|
||||
throws IOException {
|
||||
this(conf, fs.create(name), keyClass, valClass, codec, new Metadata());
|
||||
}
|
||||
|
||||
/** Create the named file with write-progress reporter. */
|
||||
/** Create the named file with write-progress reporter.
|
||||
* @param fs
|
||||
* @param conf
|
||||
* @param name
|
||||
* @param keyClass
|
||||
* @param valClass
|
||||
* @param codec
|
||||
* @param progress
|
||||
* @param metadata
|
||||
* @throws IOException
|
||||
*/
|
||||
public RecordCompressWriter(FileSystem fs, Configuration conf, Path name,
|
||||
Class keyClass, Class valClass, CompressionCodec codec,
|
||||
Progressable progress, Metadata metadata)
|
||||
|
@ -1078,14 +1147,27 @@ public class SequenceFile {
|
|||
progress, metadata);
|
||||
}
|
||||
|
||||
/** Create the named file with write-progress reporter. */
|
||||
/** Create the named file with write-progress reporter.
|
||||
* @param fs
|
||||
* @param conf
|
||||
* @param name
|
||||
* @param keyClass
|
||||
* @param valClass
|
||||
* @param bufferSize
|
||||
* @param replication
|
||||
* @param blockSize
|
||||
* @param codec
|
||||
* @param progress
|
||||
* @param metadata
|
||||
* @throws IOException
|
||||
*/
|
||||
public RecordCompressWriter(FileSystem fs, Configuration conf, Path name,
|
||||
Class keyClass, Class valClass,
|
||||
int bufferSize, short replication, long blockSize,
|
||||
CompressionCodec codec,
|
||||
Progressable progress, Metadata metadata)
|
||||
throws IOException {
|
||||
super.init(name, conf,
|
||||
super.init(conf,
|
||||
fs.create(name, true, bufferSize, replication, blockSize, progress),
|
||||
keyClass, valClass, true, codec, metadata);
|
||||
|
||||
|
@ -1094,7 +1176,16 @@ public class SequenceFile {
|
|||
finalizeFileHeader();
|
||||
}
|
||||
|
||||
/** Create the named file with write-progress reporter. */
|
||||
/** Create the named file with write-progress reporter.
|
||||
* @param fs
|
||||
* @param conf
|
||||
* @param name
|
||||
* @param keyClass
|
||||
* @param valClass
|
||||
* @param codec
|
||||
* @param progress
|
||||
* @throws IOException
|
||||
*/
|
||||
public RecordCompressWriter(FileSystem fs, Configuration conf, Path name,
|
||||
Class keyClass, Class valClass, CompressionCodec codec,
|
||||
Progressable progress)
|
||||
|
@ -1103,11 +1194,11 @@ public class SequenceFile {
|
|||
}
|
||||
|
||||
/** Write to an arbitrary stream using a specified buffer size. */
|
||||
private RecordCompressWriter(Configuration conf, FSDataOutputStream out,
|
||||
Class keyClass, Class valClass, CompressionCodec codec, Metadata metadata)
|
||||
protected RecordCompressWriter(Configuration conf, FSDataOutputStream out,
|
||||
Class keyClass, Class valClass, CompressionCodec codec, Metadata metadata)
|
||||
throws IOException {
|
||||
this.ownOutputStream = false;
|
||||
super.init(null, conf, out, keyClass, valClass, true, codec, metadata);
|
||||
super.init(conf, out, keyClass, valClass, true, codec, metadata);
|
||||
|
||||
initializeFileHeader();
|
||||
writeFileHeader();
|
||||
|
@ -1115,10 +1206,13 @@ public class SequenceFile {
|
|||
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean isCompressed() { return true; }
|
||||
@Override
|
||||
boolean isBlockCompressed() { return false; }
|
||||
|
||||
/** Append a key/value pair. */
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public synchronized void append(Object key, Object val)
|
||||
throws IOException {
|
||||
|
@ -1151,6 +1245,7 @@ public class SequenceFile {
|
|||
}
|
||||
|
||||
/** Append a key/value pair. */
|
||||
@Override
|
||||
public synchronized void appendRaw(byte[] keyData, int keyOffset,
|
||||
int keyLength, ValueBytes val) throws IOException {
|
||||
|
||||
|
@ -1181,7 +1276,15 @@ public class SequenceFile {
|
|||
|
||||
private int compressionBlockSize;
|
||||
|
||||
/** Create the named file. */
|
||||
/** Create the named file.
|
||||
* @param fs
|
||||
* @param conf
|
||||
* @param name
|
||||
* @param keyClass
|
||||
* @param valClass
|
||||
* @param codec
|
||||
* @throws IOException
|
||||
*/
|
||||
public BlockCompressWriter(FileSystem fs, Configuration conf, Path name,
|
||||
Class keyClass, Class valClass, CompressionCodec codec)
|
||||
throws IOException {
|
||||
|
@ -1191,7 +1294,17 @@ public class SequenceFile {
|
|||
null, new Metadata());
|
||||
}
|
||||
|
||||
/** Create the named file with write-progress reporter. */
|
||||
/** Create the named file with write-progress reporter.
|
||||
* @param fs
|
||||
* @param conf
|
||||
* @param name
|
||||
* @param keyClass
|
||||
* @param valClass
|
||||
* @param codec
|
||||
* @param progress
|
||||
* @param metadata
|
||||
* @throws IOException
|
||||
*/
|
||||
public BlockCompressWriter(FileSystem fs, Configuration conf, Path name,
|
||||
Class keyClass, Class valClass, CompressionCodec codec,
|
||||
Progressable progress, Metadata metadata)
|
||||
|
@ -1202,14 +1315,27 @@ public class SequenceFile {
|
|||
progress, metadata);
|
||||
}
|
||||
|
||||
/** Create the named file with write-progress reporter. */
|
||||
/** Create the named file with write-progress reporter.
|
||||
* @param fs
|
||||
* @param conf
|
||||
* @param name
|
||||
* @param keyClass
|
||||
* @param valClass
|
||||
* @param bufferSize
|
||||
* @param replication
|
||||
* @param blockSize
|
||||
* @param codec
|
||||
* @param progress
|
||||
* @param metadata
|
||||
* @throws IOException
|
||||
*/
|
||||
public BlockCompressWriter(FileSystem fs, Configuration conf, Path name,
|
||||
Class keyClass, Class valClass,
|
||||
int bufferSize, short replication, long blockSize,
|
||||
CompressionCodec codec,
|
||||
Progressable progress, Metadata metadata)
|
||||
throws IOException {
|
||||
super.init(name, conf,
|
||||
super.init(conf,
|
||||
fs.create(name, true, bufferSize, replication, blockSize, progress),
|
||||
keyClass, valClass, true, codec, metadata);
|
||||
init(conf.getInt("io.seqfile.compress.blocksize", 1000000));
|
||||
|
@ -1219,7 +1345,16 @@ public class SequenceFile {
|
|||
finalizeFileHeader();
|
||||
}
|
||||
|
||||
/** Create the named file with write-progress reporter. */
|
||||
/** Create the named file with write-progress reporter.
|
||||
* @param fs
|
||||
* @param conf
|
||||
* @param name
|
||||
* @param keyClass
|
||||
* @param valClass
|
||||
* @param codec
|
||||
* @param progress
|
||||
* @throws IOException
|
||||
*/
|
||||
public BlockCompressWriter(FileSystem fs, Configuration conf, Path name,
|
||||
Class keyClass, Class valClass, CompressionCodec codec,
|
||||
Progressable progress)
|
||||
|
@ -1228,11 +1363,11 @@ public class SequenceFile {
|
|||
}
|
||||
|
||||
/** Write to an arbitrary stream using a specified buffer size. */
|
||||
private BlockCompressWriter(Configuration conf, FSDataOutputStream out,
|
||||
Class keyClass, Class valClass, CompressionCodec codec, Metadata metadata)
|
||||
protected BlockCompressWriter(Configuration conf, FSDataOutputStream out,
|
||||
Class keyClass, Class valClass, CompressionCodec codec, Metadata metadata)
|
||||
throws IOException {
|
||||
this.ownOutputStream = false;
|
||||
super.init(null, conf, out, keyClass, valClass, true, codec, metadata);
|
||||
super.init(conf, out, keyClass, valClass, true, codec, metadata);
|
||||
init(1000000);
|
||||
|
||||
initializeFileHeader();
|
||||
|
@ -1240,7 +1375,9 @@ public class SequenceFile {
|
|||
finalizeFileHeader();
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean isCompressed() { return true; }
|
||||
@Override
|
||||
boolean isBlockCompressed() { return true; }
|
||||
|
||||
/** Initialize */
|
||||
|
@ -1268,6 +1405,7 @@ public class SequenceFile {
|
|||
}
|
||||
|
||||
/** Compress and flush contents to dfs */
|
||||
@Override
|
||||
public synchronized void sync() throws IOException {
|
||||
if (noBufferedRecords > 0) {
|
||||
super.sync();
|
||||
|
@ -1305,6 +1443,7 @@ public class SequenceFile {
|
|||
}
|
||||
|
||||
/** Append a key/value pair. */
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public synchronized void append(Object key, Object val)
|
||||
throws IOException {
|
||||
|
@ -1337,6 +1476,7 @@ public class SequenceFile {
|
|||
}
|
||||
|
||||
/** Append a key/value pair. */
|
||||
@Override
|
||||
public synchronized void appendRaw(byte[] keyData, int keyOffset,
|
||||
int keyLength, ValueBytes val) throws IOException {
|
||||
|
||||
|
@ -1420,7 +1560,12 @@ public class SequenceFile {
|
|||
private Deserializer keyDeserializer;
|
||||
private Deserializer valDeserializer;
|
||||
|
||||
/** Open the named file. */
|
||||
/** Open the named file.
|
||||
* @param fs
|
||||
* @param file
|
||||
* @param conf
|
||||
* @throws IOException
|
||||
*/
|
||||
public Reader(FileSystem fs, Path file, Configuration conf)
|
||||
throws IOException {
|
||||
this(fs, file, conf.getInt("io.file.buffer.size", 4096), conf, false);
|
||||
|
@ -1577,7 +1722,9 @@ public class SequenceFile {
|
|||
return sf.getDeserializer(c);
|
||||
}
|
||||
|
||||
/** Close the file. */
|
||||
/** Close the file.
|
||||
* @throws IOException
|
||||
*/
|
||||
public synchronized void close() throws IOException {
|
||||
// Return the decompressors to the pool
|
||||
CodecPool.returnDecompressor(keyLenDecompressor);
|
||||
|
@ -1598,12 +1745,16 @@ public class SequenceFile {
|
|||
in.close();
|
||||
}
|
||||
|
||||
/** Returns the name of the key class. */
|
||||
/** Returns the name of the key class.
|
||||
* @return String
|
||||
*/
|
||||
public String getKeyClassName() {
|
||||
return keyClassName;
|
||||
}
|
||||
|
||||
/** Returns the class of keys in this file. */
|
||||
/** Returns the class of keys in this file.
|
||||
* @return Class
|
||||
*/
|
||||
public synchronized Class<?> getKeyClass() {
|
||||
if (null == keyClass) {
|
||||
try {
|
||||
|
@ -1615,12 +1766,16 @@ public class SequenceFile {
|
|||
return keyClass;
|
||||
}
|
||||
|
||||
/** Returns the name of the value class. */
|
||||
/** Returns the name of the value class.
|
||||
* @return String
|
||||
*/
|
||||
public String getValueClassName() {
|
||||
return valClassName;
|
||||
}
|
||||
|
||||
/** Returns the class of values in this file. */
|
||||
/** Returns the class of values in this file.
|
||||
* @return Class
|
||||
*/
|
||||
public synchronized Class<?> getValueClass() {
|
||||
if (null == valClass) {
|
||||
try {
|
||||
|
@ -1632,7 +1787,9 @@ public class SequenceFile {
|
|||
return valClass;
|
||||
}
|
||||
|
||||
/** Returns true if values are compressed. */
|
||||
/** Returns true if values are compressed.
|
||||
* @return
|
||||
*/
|
||||
public boolean isCompressed() { return decompress; }
|
||||
|
||||
/** Returns true if records are block-compressed. */
|
||||
|
|
|
@ -28,14 +28,14 @@ import java.nio.ByteBuffer;
|
|||
public interface BlockCache {
|
||||
/**
|
||||
* Add block to cache.
|
||||
* @param blockNumber Zero-based file block number.
|
||||
* @param blockName Zero-based file block number.
|
||||
* @param buf The block contents wrapped in a ByteBuffer.
|
||||
*/
|
||||
public void cacheBlock(String blockName, ByteBuffer buf);
|
||||
|
||||
/**
|
||||
* Fetch block from cache.
|
||||
* @param blockNumber Block number to fetch.
|
||||
* @param blockName Block number to fetch.
|
||||
* @return Block or null if block is not in the cache.
|
||||
*/
|
||||
public ByteBuffer getBlock(String blockName);
|
||||
|
|
|
@ -143,6 +143,7 @@ public class HFile {
|
|||
*/
|
||||
public final static Compression.Algorithm DEFAULT_COMPRESSION_ALGORITHM =
|
||||
Compression.Algorithm.NONE;
|
||||
/** Default compression name: none. */
|
||||
public final static String DEFAULT_COMPRESSION =
|
||||
DEFAULT_COMPRESSION_ALGORITHM.getName();
|
||||
|
||||
|
@ -228,7 +229,6 @@ public class HFile {
|
|||
* @param blocksize
|
||||
* @param compress
|
||||
* @param comparator
|
||||
* @param bloomfilter
|
||||
* @throws IOException
|
||||
* @throws IOException
|
||||
*/
|
||||
|
@ -268,7 +268,6 @@ public class HFile {
|
|||
* @param blocksize
|
||||
* @param compress
|
||||
* @param c
|
||||
* @param bloomfilter
|
||||
* @throws IOException
|
||||
*/
|
||||
public Writer(final FSDataOutputStream ostream, final int blocksize,
|
||||
|
@ -428,6 +427,7 @@ public class HFile {
|
|||
return this.path;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "writer=" + this.name + ", compression=" +
|
||||
this.compressAlgo.getName();
|
||||
|
@ -664,6 +664,7 @@ public class HFile {
|
|||
this.name = this.istream.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "reader=" + this.name +
|
||||
(!isFileInfoLoaded()? "":
|
||||
|
@ -1244,6 +1245,7 @@ public class HFile {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "fileinfoOffset=" + fileinfoOffset +
|
||||
", dataIndexOffset=" + dataIndexOffset +
|
||||
|
@ -1413,6 +1415,7 @@ public class HFile {
|
|||
return bi;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("size=" + count);
|
||||
|
|
|
@ -57,6 +57,7 @@ public interface HFileScanner {
|
|||
* that: k[i] < key. Furthermore: there may be a k[i+1], such that
|
||||
* k[i] < key <= k[i+1] but there may also NOT be a k[i+1], and next() will
|
||||
* return false (EOF).
|
||||
* @throws IOException
|
||||
*/
|
||||
public boolean seekBefore(byte [] key) throws IOException;
|
||||
/**
|
||||
|
|
|
@ -66,20 +66,20 @@ public class HBaseClient {
|
|||
|
||||
public static final Log LOG =
|
||||
LogFactory.getLog("org.apache.hadoop.ipc.HBaseClass");
|
||||
private Hashtable<ConnectionId, Connection> connections =
|
||||
protected Hashtable<ConnectionId, Connection> connections =
|
||||
new Hashtable<ConnectionId, Connection>();
|
||||
|
||||
private Class<? extends Writable> valueClass; // class of call values
|
||||
private int counter; // counter for call ids
|
||||
private AtomicBoolean running = new AtomicBoolean(true); // if client runs
|
||||
final private Configuration conf;
|
||||
final private int maxIdleTime; //connections will be culled if it was idle for
|
||||
protected Class<? extends Writable> valueClass; // class of call values
|
||||
protected int counter; // counter for call ids
|
||||
protected AtomicBoolean running = new AtomicBoolean(true); // if client runs
|
||||
final protected Configuration conf;
|
||||
final protected int maxIdleTime; //connections will be culled if it was idle for
|
||||
//maxIdleTime msecs
|
||||
final private int maxRetries; //the max. no. of retries for socket connections
|
||||
private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
|
||||
private int pingInterval; // how often sends ping to the server in msecs
|
||||
final protected int maxRetries; //the max. no. of retries for socket connections
|
||||
protected boolean tcpNoDelay; // if T then disable Nagle's Algorithm
|
||||
protected int pingInterval; // how often sends ping to the server in msecs
|
||||
|
||||
private SocketFactory socketFactory; // how to create sockets
|
||||
protected SocketFactory socketFactory; // how to create sockets
|
||||
private int refCount = 1;
|
||||
|
||||
final private static String PING_INTERVAL_NAME = "ipc.ping.interval";
|
||||
|
@ -187,7 +187,7 @@ public class HBaseClient {
|
|||
// currently active calls
|
||||
private Hashtable<Integer, Call> calls = new Hashtable<Integer, Call>();
|
||||
private AtomicLong lastActivity = new AtomicLong();// last I/O activity time
|
||||
private AtomicBoolean shouldCloseConnection = new AtomicBoolean(); // indicate if the connection is closed
|
||||
protected AtomicBoolean shouldCloseConnection = new AtomicBoolean(); // indicate if the connection is closed
|
||||
private IOException closeException; // close reason
|
||||
|
||||
public Connection(InetSocketAddress address) throws IOException {
|
||||
|
@ -219,7 +219,7 @@ public class HBaseClient {
|
|||
* @param call to add
|
||||
* @return true if the call was added.
|
||||
*/
|
||||
private synchronized boolean addCall(Call call) {
|
||||
protected synchronized boolean addCall(Call call) {
|
||||
if (shouldCloseConnection.get())
|
||||
return false;
|
||||
calls.put(call.id, call);
|
||||
|
@ -244,9 +244,8 @@ public class HBaseClient {
|
|||
private void handleTimeout(SocketTimeoutException e) throws IOException {
|
||||
if (shouldCloseConnection.get() || !running.get()) {
|
||||
throw e;
|
||||
} else {
|
||||
sendPing();
|
||||
}
|
||||
sendPing();
|
||||
}
|
||||
|
||||
/** Read a byte from the stream.
|
||||
|
@ -254,6 +253,7 @@ public class HBaseClient {
|
|||
* until a byte is read.
|
||||
* @throws IOException for any IO problem other than socket timeout
|
||||
*/
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
do {
|
||||
try {
|
||||
|
@ -270,6 +270,7 @@ public class HBaseClient {
|
|||
*
|
||||
* @return the total number of bytes read; -1 if the connection is closed.
|
||||
*/
|
||||
@Override
|
||||
public int read(byte[] buf, int off, int len) throws IOException {
|
||||
do {
|
||||
try {
|
||||
|
@ -285,7 +286,7 @@ public class HBaseClient {
|
|||
* a header to the server and starts
|
||||
* the connection thread that waits for responses.
|
||||
*/
|
||||
private synchronized void setupIOstreams() {
|
||||
protected synchronized void setupIOstreams() {
|
||||
if (socket != null || shouldCloseConnection.get()) {
|
||||
return;
|
||||
}
|
||||
|
@ -423,7 +424,7 @@ public class HBaseClient {
|
|||
/* Send a ping to the server if the time elapsed
|
||||
* since last I/O activity is equal to or greater than the ping interval
|
||||
*/
|
||||
private synchronized void sendPing() throws IOException {
|
||||
protected synchronized void sendPing() throws IOException {
|
||||
long curTime = System.currentTimeMillis();
|
||||
if ( curTime - lastActivity.get() >= pingInterval) {
|
||||
lastActivity.set(curTime);
|
||||
|
@ -434,6 +435,7 @@ public class HBaseClient {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug(getName() + ": starting, having connections "
|
||||
|
@ -453,6 +455,7 @@ public class HBaseClient {
|
|||
/** Initiates a call by sending the parameter to the remote server.
|
||||
* Note: this is not called from the Connection thread, but by other
|
||||
* threads.
|
||||
* @param call
|
||||
*/
|
||||
public void sendParam(Call call) {
|
||||
if (shouldCloseConnection.get()) {
|
||||
|
@ -580,7 +583,7 @@ public class HBaseClient {
|
|||
/** Call implementation used for parallel calls. */
|
||||
private class ParallelCall extends Call {
|
||||
private ParallelResults results;
|
||||
private int index;
|
||||
protected int index;
|
||||
|
||||
public ParallelCall(Writable param, ParallelResults results, int index) {
|
||||
super(param);
|
||||
|
@ -589,6 +592,7 @@ public class HBaseClient {
|
|||
}
|
||||
|
||||
/** Deliver result to result collector. */
|
||||
@Override
|
||||
protected void callComplete() {
|
||||
results.callComplete(this);
|
||||
}
|
||||
|
@ -596,16 +600,19 @@ public class HBaseClient {
|
|||
|
||||
/** Result collector for parallel calls. */
|
||||
private static class ParallelResults {
|
||||
private Writable[] values;
|
||||
private int size;
|
||||
private int count;
|
||||
protected Writable[] values;
|
||||
protected int size;
|
||||
protected int count;
|
||||
|
||||
public ParallelResults(int size) {
|
||||
this.values = new Writable[size];
|
||||
this.size = size;
|
||||
}
|
||||
|
||||
/** Collect a result. */
|
||||
/**
|
||||
* Collect a result.
|
||||
* @param call
|
||||
*/
|
||||
public synchronized void callComplete(ParallelCall call) {
|
||||
values[call.index] = call.value; // store the value
|
||||
count++; // count it
|
||||
|
@ -614,8 +621,13 @@ public class HBaseClient {
|
|||
}
|
||||
}
|
||||
|
||||
/** Construct an IPC client whose values are of the given {@link Writable}
|
||||
* class. */
|
||||
/**
|
||||
* Construct an IPC client whose values are of the given {@link Writable}
|
||||
* class.
|
||||
* @param valueClass
|
||||
* @param conf
|
||||
* @param factory
|
||||
*/
|
||||
public HBaseClient(Class<? extends Writable> valueClass, Configuration conf,
|
||||
SocketFactory factory) {
|
||||
this.valueClass = valueClass;
|
||||
|
@ -677,15 +689,20 @@ public class HBaseClient {
|
|||
|
||||
/** Make a call, passing <code>param</code>, to the IPC server running at
|
||||
* <code>address</code>, returning the value. Throws exceptions if there are
|
||||
* network problems or if the remote code threw an exception. */
|
||||
* network problems or if the remote code threw an exception.
|
||||
* @param param
|
||||
* @param address
|
||||
* @return Writable
|
||||
* @throws IOException
|
||||
*/
|
||||
public Writable call(Writable param, InetSocketAddress address)
|
||||
throws InterruptedException, IOException {
|
||||
throws IOException {
|
||||
return call(param, address, null);
|
||||
}
|
||||
|
||||
public Writable call(Writable param, InetSocketAddress addr,
|
||||
UserGroupInformation ticket)
|
||||
throws InterruptedException, IOException {
|
||||
throws IOException {
|
||||
Call call = new Call(param);
|
||||
Connection connection = getConnection(addr, ticket, call);
|
||||
connection.sendParam(call); // send the parameter
|
||||
|
@ -700,12 +717,11 @@ public class HBaseClient {
|
|||
if (call.error instanceof RemoteException) {
|
||||
call.error.fillInStackTrace();
|
||||
throw call.error;
|
||||
} else { // local exception
|
||||
throw wrapException(addr, call.error);
|
||||
}
|
||||
} else {
|
||||
return call.value;
|
||||
// local exception
|
||||
throw wrapException(addr, call.error);
|
||||
}
|
||||
return call.value;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -743,7 +759,12 @@ public class HBaseClient {
|
|||
/** Makes a set of calls in parallel. Each parameter is sent to the
|
||||
* corresponding address. When all values are available, or have timed out
|
||||
* or errored, the collected results are returned in an array. The array
|
||||
* contains nulls for calls that timed out or errored. */
|
||||
* contains nulls for calls that timed out or errored.
|
||||
* @param params
|
||||
* @param addresses
|
||||
* @return Writable[]
|
||||
* @throws IOException
|
||||
*/
|
||||
public Writable[] call(Writable[] params, InetSocketAddress[] addresses)
|
||||
throws IOException {
|
||||
if (addresses.length == 0) return new Writable[0];
|
||||
|
|
|
@ -78,7 +78,7 @@ public class HBaseRPC {
|
|||
// Leave this out in the hadoop ipc package but keep class name. Do this
|
||||
// so that we dont' get the logging of this class's invocations by doing our
|
||||
// blanket enabling DEBUG on the o.a.h.h. package.
|
||||
private static final Log LOG =
|
||||
protected static final Log LOG =
|
||||
LogFactory.getLog("org.apache.hadoop.ipc.HbaseRPC");
|
||||
|
||||
private HBaseRPC() {
|
||||
|
@ -236,6 +236,8 @@ public class HBaseRPC {
|
|||
private Map<SocketFactory, HBaseClient> clients =
|
||||
new HashMap<SocketFactory, HBaseClient>();
|
||||
|
||||
protected ClientCache() {}
|
||||
|
||||
/**
|
||||
* Construct & cache an IPC client with the user-provided SocketFactory
|
||||
* if no cached client exists.
|
||||
|
@ -243,7 +245,7 @@ public class HBaseRPC {
|
|||
* @param conf Configuration
|
||||
* @return an IPC client
|
||||
*/
|
||||
private synchronized HBaseClient getClient(Configuration conf,
|
||||
protected synchronized HBaseClient getClient(Configuration conf,
|
||||
SocketFactory factory) {
|
||||
// Construct & cache client. The configuration is only used for timeout,
|
||||
// and Clients have connection pools. So we can either (a) lose some
|
||||
|
@ -256,7 +258,7 @@ public class HBaseRPC {
|
|||
client = new HBaseClient(HbaseObjectWritable.class, conf, factory);
|
||||
clients.put(factory, client);
|
||||
} else {
|
||||
((HBaseClient)client).incCount();
|
||||
client.incCount();
|
||||
}
|
||||
return client;
|
||||
}
|
||||
|
@ -268,7 +270,7 @@ public class HBaseRPC {
|
|||
* @param conf Configuration
|
||||
* @return an IPC client
|
||||
*/
|
||||
private synchronized HBaseClient getClient(Configuration conf) {
|
||||
protected synchronized HBaseClient getClient(Configuration conf) {
|
||||
return getClient(conf, SocketFactory.getDefault());
|
||||
}
|
||||
|
||||
|
@ -276,20 +278,20 @@ public class HBaseRPC {
|
|||
* Stop a RPC client connection
|
||||
* A RPC client is closed only when its reference count becomes zero.
|
||||
*/
|
||||
private void stopClient(HBaseClient client) {
|
||||
protected void stopClient(HBaseClient client) {
|
||||
synchronized (this) {
|
||||
((HBaseClient)client).decCount();
|
||||
if (((HBaseClient)client).isZeroReference()) {
|
||||
clients.remove(((HBaseClient)client).getSocketFactory());
|
||||
client.decCount();
|
||||
if (client.isZeroReference()) {
|
||||
clients.remove(client.getSocketFactory());
|
||||
}
|
||||
}
|
||||
if (((HBaseClient)client).isZeroReference()) {
|
||||
if (client.isZeroReference()) {
|
||||
client.stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static ClientCache CLIENTS = new ClientCache();
|
||||
protected final static ClientCache CLIENTS = new ClientCache();
|
||||
|
||||
private static class Invoker implements InvocationHandler {
|
||||
private InetSocketAddress address;
|
||||
|
@ -310,9 +312,8 @@ public class HBaseRPC {
|
|||
this.client = CLIENTS.getClient(conf, factory);
|
||||
}
|
||||
|
||||
public Object invoke(@SuppressWarnings("unused") Object proxy,
|
||||
Method method, Object[] args)
|
||||
throws Throwable {
|
||||
public Object invoke(Object proxy, Method method, Object[] args)
|
||||
throws Throwable {
|
||||
final boolean logDebug = LOG.isDebugEnabled();
|
||||
long startTime = 0;
|
||||
if (logDebug) {
|
||||
|
@ -328,7 +329,7 @@ public class HBaseRPC {
|
|||
}
|
||||
|
||||
/* close the IPC client that's responsible for this invoker's RPCs */
|
||||
synchronized private void close() {
|
||||
synchronized protected void close() {
|
||||
if (!isClosed) {
|
||||
isClosed = true;
|
||||
CLIENTS.stopClient(client);
|
||||
|
@ -468,10 +469,9 @@ public class HBaseRPC {
|
|||
clientVersion);
|
||||
if (serverVersion == clientVersion) {
|
||||
return proxy;
|
||||
} else {
|
||||
throw new VersionMismatch(protocol.getName(), clientVersion,
|
||||
serverVersion);
|
||||
}
|
||||
throw new VersionMismatch(protocol.getName(), clientVersion,
|
||||
serverVersion);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -657,11 +657,10 @@ public class HBaseRPC {
|
|||
Throwable target = e.getTargetException();
|
||||
if (target instanceof IOException) {
|
||||
throw (IOException)target;
|
||||
} else {
|
||||
IOException ioe = new IOException(target.toString());
|
||||
ioe.setStackTrace(target.getStackTrace());
|
||||
throw ioe;
|
||||
}
|
||||
IOException ioe = new IOException(target.toString());
|
||||
ioe.setStackTrace(target.getStackTrace());
|
||||
throw ioe;
|
||||
} catch (Throwable e) {
|
||||
IOException ioe = new IOException(e.toString());
|
||||
ioe.setStackTrace(e.getStackTrace());
|
||||
|
@ -670,9 +669,10 @@ public class HBaseRPC {
|
|||
}
|
||||
}
|
||||
|
||||
private static void log(String value) {
|
||||
if (value!= null && value.length() > 55)
|
||||
value = value.substring(0, 55)+"...";
|
||||
LOG.info(value);
|
||||
protected static void log(String value) {
|
||||
String v = value;
|
||||
if (v != null && v.length() > 55)
|
||||
v = v.substring(0, 55)+"...";
|
||||
LOG.info(v);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ public class HBaseRpcMetrics implements Updater {
|
|||
private MetricsRecord metricsRecord;
|
||||
private static Log LOG = LogFactory.getLog(HBaseRpcMetrics.class);
|
||||
|
||||
public HBaseRpcMetrics(String hostName, String port, HBaseServer server) {
|
||||
public HBaseRpcMetrics(String hostName, String port) {
|
||||
MetricsContext context = MetricsUtil.getContext("rpc");
|
||||
metricsRecord = MetricsUtil.createRecord(context, "metrics");
|
||||
|
||||
|
@ -75,6 +75,7 @@ public class HBaseRpcMetrics implements Updater {
|
|||
|
||||
/**
|
||||
* Push the metrics to the monitoring subsystem on doUpdate() call.
|
||||
* @param context
|
||||
*/
|
||||
public void doUpdates(MetricsContext context) {
|
||||
rpcQueueTime.pushMetric(metricsRecord);
|
||||
|
|
|
@ -86,12 +86,14 @@ public abstract class HBaseServer {
|
|||
public static final Log LOG =
|
||||
LogFactory.getLog("org.apache.hadoop.ipc.HBaseServer");
|
||||
|
||||
private static final ThreadLocal<HBaseServer> SERVER = new ThreadLocal<HBaseServer>();
|
||||
protected static final ThreadLocal<HBaseServer> SERVER = new ThreadLocal<HBaseServer>();
|
||||
|
||||
/** Returns the server instance called under or null. May be called under
|
||||
* {@link #call(Writable, long)} implementations, and under {@link Writable}
|
||||
* methods of paramters and return values. Permits applications to access
|
||||
* the server context.*/
|
||||
* the server context.
|
||||
* @return HBaseServer
|
||||
*/
|
||||
public static HBaseServer get() {
|
||||
return SERVER.get();
|
||||
}
|
||||
|
@ -99,10 +101,11 @@ public abstract class HBaseServer {
|
|||
/** This is set to Call object before Handler invokes an RPC and reset
|
||||
* after the call returns.
|
||||
*/
|
||||
private static final ThreadLocal<Call> CurCall = new ThreadLocal<Call>();
|
||||
protected static final ThreadLocal<Call> CurCall = new ThreadLocal<Call>();
|
||||
|
||||
/** Returns the remote side ip address when invoked inside an RPC
|
||||
* Returns null incase of an error.
|
||||
* @return InetAddress
|
||||
*/
|
||||
public static InetAddress getRemoteIp() {
|
||||
Call call = CurCall.get();
|
||||
|
@ -113,46 +116,47 @@ public abstract class HBaseServer {
|
|||
}
|
||||
/** Returns remote address as a string when invoked inside an RPC.
|
||||
* Returns null in case of an error.
|
||||
* @return String
|
||||
*/
|
||||
public static String getRemoteAddress() {
|
||||
InetAddress addr = getRemoteIp();
|
||||
return (addr == null) ? null : addr.getHostAddress();
|
||||
}
|
||||
|
||||
private String bindAddress;
|
||||
private int port; // port we listen on
|
||||
protected String bindAddress;
|
||||
protected int port; // port we listen on
|
||||
private int handlerCount; // number of handler threads
|
||||
private Class<? extends Writable> paramClass; // class of call parameters
|
||||
private int maxIdleTime; // the maximum idle time after
|
||||
protected Class<? extends Writable> paramClass; // class of call parameters
|
||||
protected int maxIdleTime; // the maximum idle time after
|
||||
// which a client may be disconnected
|
||||
private int thresholdIdleConnections; // the number of idle connections
|
||||
protected int thresholdIdleConnections; // the number of idle connections
|
||||
// after which we will start
|
||||
// cleaning up idle
|
||||
// connections
|
||||
int maxConnectionsToNuke; // the max number of
|
||||
// connections to nuke
|
||||
//during a cleanup
|
||||
// during a cleanup
|
||||
|
||||
protected HBaseRpcMetrics rpcMetrics;
|
||||
|
||||
private Configuration conf;
|
||||
protected Configuration conf;
|
||||
|
||||
private int maxQueueSize;
|
||||
private int socketSendBufferSize;
|
||||
private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
|
||||
protected int socketSendBufferSize;
|
||||
protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
|
||||
|
||||
volatile private boolean running = true; // true while server runs
|
||||
private BlockingQueue<Call> callQueue; // queued calls
|
||||
volatile protected boolean running = true; // true while server runs
|
||||
protected BlockingQueue<Call> callQueue; // queued calls
|
||||
|
||||
private List<Connection> connectionList =
|
||||
protected List<Connection> connectionList =
|
||||
Collections.synchronizedList(new LinkedList<Connection>());
|
||||
//maintain a list
|
||||
//of client connections
|
||||
private Listener listener = null;
|
||||
private Responder responder = null;
|
||||
private int numConnections = 0;
|
||||
protected Responder responder = null;
|
||||
protected int numConnections = 0;
|
||||
private Handler[] handlers = null;
|
||||
private HBaseRPCErrorHandler errorHandler = null;
|
||||
protected HBaseRPCErrorHandler errorHandler = null;
|
||||
|
||||
/**
|
||||
* A convenience method to bind to a given address and report
|
||||
|
@ -179,20 +183,19 @@ public abstract class HBaseServer {
|
|||
if ("Unresolved address".equals(e.getMessage())) {
|
||||
throw new UnknownHostException("Invalid hostname for server: " +
|
||||
address.getHostName());
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
/** A call queued for handling. */
|
||||
private static class Call {
|
||||
private int id; // the client's call id
|
||||
private Writable param; // the parameter passed
|
||||
private Connection connection; // connection to client
|
||||
private long timestamp; // the time received when response is null
|
||||
protected int id; // the client's call id
|
||||
protected Writable param; // the parameter passed
|
||||
protected Connection connection; // connection to client
|
||||
protected long timestamp; // the time received when response is null
|
||||
// the time served when response is not null
|
||||
private ByteBuffer response; // the response for this call
|
||||
protected ByteBuffer response; // the response for this call
|
||||
|
||||
public Call(int id, Writable param, Connection connection) {
|
||||
this.id = id;
|
||||
|
@ -317,7 +320,7 @@ public abstract class HBaseServer {
|
|||
if (errorHandler != null) {
|
||||
if (errorHandler.checkOOME(e)) {
|
||||
LOG.info(getName() + ": exiting on OOME");
|
||||
closeCurrentConnection(key, e);
|
||||
closeCurrentConnection(key);
|
||||
cleanupConnections(true);
|
||||
return;
|
||||
}
|
||||
|
@ -326,7 +329,7 @@ public abstract class HBaseServer {
|
|||
// log the event and sleep for a minute and give
|
||||
// some thread(s) a chance to finish
|
||||
LOG.warn("Out of Memory in server select", e);
|
||||
closeCurrentConnection(key, e);
|
||||
closeCurrentConnection(key);
|
||||
cleanupConnections(true);
|
||||
try { Thread.sleep(60000); } catch (Exception ie) {}
|
||||
}
|
||||
|
@ -336,7 +339,7 @@ public abstract class HBaseServer {
|
|||
StringUtils.stringifyException(e));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
closeCurrentConnection(key, e);
|
||||
closeCurrentConnection(key);
|
||||
}
|
||||
cleanupConnections(false);
|
||||
}
|
||||
|
@ -358,7 +361,7 @@ public abstract class HBaseServer {
|
|||
}
|
||||
}
|
||||
|
||||
private void closeCurrentConnection(SelectionKey key, Throwable e) {
|
||||
private void closeCurrentConnection(SelectionKey key) {
|
||||
if (key != null) {
|
||||
Connection c = (Connection)key.attachment();
|
||||
if (c != null) {
|
||||
|
@ -385,7 +388,7 @@ public abstract class HBaseServer {
|
|||
channel.configureBlocking(false);
|
||||
channel.socket().setTcpNoDelay(tcpNoDelay);
|
||||
SelectionKey readKey = channel.register(selector, SelectionKey.OP_READ);
|
||||
c = new Connection(readKey, channel, System.currentTimeMillis());
|
||||
c = new Connection(channel, System.currentTimeMillis());
|
||||
readKey.attach(c);
|
||||
synchronized (connectionList) {
|
||||
connectionList.add(numConnections, c);
|
||||
|
@ -504,11 +507,7 @@ public abstract class HBaseServer {
|
|||
}
|
||||
|
||||
for(Call call : calls) {
|
||||
try {
|
||||
doPurge(call, now);
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Error in purging old calls " + e);
|
||||
}
|
||||
doPurge(call, now);
|
||||
}
|
||||
} catch (OutOfMemoryError e) {
|
||||
if (errorHandler != null) {
|
||||
|
@ -562,14 +561,14 @@ public abstract class HBaseServer {
|
|||
// Remove calls that have been pending in the responseQueue
|
||||
// for a long time.
|
||||
//
|
||||
private void doPurge(Call call, long now) throws IOException {
|
||||
private void doPurge(Call call, long now) {
|
||||
LinkedList<Call> responseQueue = call.connection.responseQueue;
|
||||
synchronized (responseQueue) {
|
||||
Iterator<Call> iter = responseQueue.listIterator(0);
|
||||
while (iter.hasNext()) {
|
||||
call = iter.next();
|
||||
if (now > call.timestamp + PURGE_INTERVAL) {
|
||||
closeConnection(call.connection);
|
||||
Call nextCall = iter.next();
|
||||
if (now > nextCall.timestamp + PURGE_INTERVAL) {
|
||||
closeConnection(nextCall.connection);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -698,22 +697,21 @@ public abstract class HBaseServer {
|
|||
//version are read
|
||||
private boolean headerRead = false; //if the connection header that
|
||||
//follows version is read.
|
||||
private SocketChannel channel;
|
||||
protected SocketChannel channel;
|
||||
private ByteBuffer data;
|
||||
private ByteBuffer dataLengthBuffer;
|
||||
private LinkedList<Call> responseQueue;
|
||||
protected LinkedList<Call> responseQueue;
|
||||
private volatile int rpcCount = 0; // number of outstanding rpcs
|
||||
private long lastContact;
|
||||
private int dataLength;
|
||||
private Socket socket;
|
||||
protected Socket socket;
|
||||
// Cache the remote host & port info so that even if the socket is
|
||||
// disconnected, we can say where it used to connect to.
|
||||
private String hostAddress;
|
||||
private int remotePort;
|
||||
private UserGroupInformation ticket = null;
|
||||
protected UserGroupInformation ticket = null;
|
||||
|
||||
public Connection(SelectionKey key, SocketChannel channel,
|
||||
long lastContact) {
|
||||
public Connection(SocketChannel channel, long lastContact) {
|
||||
this.channel = channel;
|
||||
this.lastContact = lastContact;
|
||||
this.data = null;
|
||||
|
@ -760,7 +758,7 @@ public abstract class HBaseServer {
|
|||
}
|
||||
|
||||
/* Decrement the outstanding RPC count */
|
||||
private void decRpcCount() {
|
||||
protected void decRpcCount() {
|
||||
rpcCount--;
|
||||
}
|
||||
|
||||
|
@ -769,7 +767,7 @@ public abstract class HBaseServer {
|
|||
rpcCount++;
|
||||
}
|
||||
|
||||
private boolean timedOut(long currentTime) {
|
||||
protected boolean timedOut(long currentTime) {
|
||||
if (isIdle() && currentTime - lastContact > maxIdleTime)
|
||||
return true;
|
||||
return false;
|
||||
|
@ -831,12 +829,11 @@ public abstract class HBaseServer {
|
|||
processData();
|
||||
data = null;
|
||||
return count;
|
||||
} else {
|
||||
processHeader();
|
||||
headerRead = true;
|
||||
data = null;
|
||||
continue;
|
||||
}
|
||||
processHeader();
|
||||
headerRead = true;
|
||||
data = null;
|
||||
continue;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
@ -867,7 +864,7 @@ public abstract class HBaseServer {
|
|||
callQueue.put(call); // queue the call; maybe blocked here
|
||||
}
|
||||
|
||||
private synchronized void close() throws IOException {
|
||||
protected synchronized void close() {
|
||||
data = null;
|
||||
dataLengthBuffer = null;
|
||||
if (!channel.isOpen())
|
||||
|
@ -995,30 +992,28 @@ public abstract class HBaseServer {
|
|||
listener = new Listener();
|
||||
this.port = listener.getAddress().getPort();
|
||||
this.rpcMetrics = new HBaseRpcMetrics(serverName,
|
||||
Integer.toString(this.port), this);
|
||||
Integer.toString(this.port));
|
||||
this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", false);
|
||||
|
||||
|
||||
// Create the responder here
|
||||
responder = new Responder();
|
||||
}
|
||||
|
||||
private void closeConnection(Connection connection) {
|
||||
protected void closeConnection(Connection connection) {
|
||||
synchronized (connectionList) {
|
||||
if (connectionList.remove(connection))
|
||||
numConnections--;
|
||||
}
|
||||
try {
|
||||
connection.close();
|
||||
} catch (IOException e) {
|
||||
}
|
||||
connection.close();
|
||||
}
|
||||
|
||||
/** Sets the socket buffer size used for responding to RPCs */
|
||||
/** Sets the socket buffer size used for responding to RPCs.
|
||||
* @param size
|
||||
*/
|
||||
public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; }
|
||||
|
||||
/** Starts the service. Must be called before any calls will be handled. */
|
||||
public synchronized void start() throws IOException {
|
||||
public synchronized void start() {
|
||||
responder.start();
|
||||
listener.start();
|
||||
handlers = new Handler[handlerCount];
|
||||
|
@ -1052,6 +1047,7 @@ public abstract class HBaseServer {
|
|||
/** Wait for the server to be stopped.
|
||||
* Does not wait for all subthreads to finish.
|
||||
* See {@link #stop()}.
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
public synchronized void join() throws InterruptedException {
|
||||
while (running) {
|
||||
|
@ -1067,11 +1063,15 @@ public abstract class HBaseServer {
|
|||
return listener.getAddress();
|
||||
}
|
||||
|
||||
/** Called for each call. */
|
||||
/** Called for each call.
|
||||
* @param param
|
||||
* @param receiveTime
|
||||
* @return Writable
|
||||
* @throws IOException
|
||||
*/
|
||||
public abstract Writable call(Writable param, long receiveTime)
|
||||
throws IOException;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* The number of open RPC conections
|
||||
* @return the number of open rpc connections
|
||||
|
@ -1113,14 +1113,12 @@ public abstract class HBaseServer {
|
|||
*
|
||||
* @see WritableByteChannel#write(ByteBuffer)
|
||||
*/
|
||||
private static int channelWrite(WritableByteChannel channel,
|
||||
ByteBuffer buffer) throws IOException {
|
||||
|
||||
protected static int channelWrite(WritableByteChannel channel,
|
||||
ByteBuffer buffer) throws IOException {
|
||||
return (buffer.remaining() <= NIO_BUFFER_LIMIT) ?
|
||||
channel.write(buffer) : channelIO(null, channel, buffer);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* This is a wrapper around {@link ReadableByteChannel#read(ByteBuffer)}.
|
||||
* If the amount of data is large, it writes to channel in smaller chunks.
|
||||
|
@ -1129,13 +1127,12 @@ public abstract class HBaseServer {
|
|||
*
|
||||
* @see ReadableByteChannel#read(ByteBuffer)
|
||||
*/
|
||||
private static int channelRead(ReadableByteChannel channel,
|
||||
ByteBuffer buffer) throws IOException {
|
||||
|
||||
protected static int channelRead(ReadableByteChannel channel,
|
||||
ByteBuffer buffer) throws IOException {
|
||||
return (buffer.remaining() <= NIO_BUFFER_LIMIT) ?
|
||||
channel.read(buffer) : channelIO(channel, null, buffer);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Helper for {@link #channelRead(ReadableByteChannel, ByteBuffer)}
|
||||
* and {@link #channelWrite(WritableByteChannel, ByteBuffer)}. Only
|
||||
|
|
|
@ -86,6 +86,11 @@ implements TableMap<ImmutableBytesWritable,RowResult> {
|
|||
*
|
||||
* Pass the new key and value to reduce.
|
||||
* If any of the grouping columns are not found in the value, the record is skipped.
|
||||
* @param key
|
||||
* @param value
|
||||
* @param output
|
||||
* @param reporter
|
||||
* @throws IOException
|
||||
*/
|
||||
public void map(ImmutableBytesWritable key, RowResult value,
|
||||
OutputCollector<ImmutableBytesWritable,RowResult> output,
|
||||
|
|
|
@ -59,6 +59,11 @@ implements TableMap<ImmutableBytesWritable, RowResult> {
|
|||
|
||||
/**
|
||||
* Pass the key, value to reduce
|
||||
* @param key
|
||||
* @param value
|
||||
* @param output
|
||||
* @param reporter
|
||||
* @throws IOException
|
||||
*/
|
||||
public void map(ImmutableBytesWritable key, RowResult value,
|
||||
OutputCollector<ImmutableBytesWritable,RowResult> output,
|
||||
|
|
|
@ -42,6 +42,11 @@ implements TableReduce<ImmutableBytesWritable, BatchUpdate> {
|
|||
|
||||
/**
|
||||
* No aggregation, output pairs of (key, record)
|
||||
* @param key
|
||||
* @param values
|
||||
* @param output
|
||||
* @param reporter
|
||||
* @throws IOException
|
||||
*/
|
||||
public void reduce(ImmutableBytesWritable key, Iterator<BatchUpdate> values,
|
||||
OutputCollector<ImmutableBytesWritable, BatchUpdate> output,
|
||||
|
|
|
@ -45,6 +45,8 @@ public class IndexOutputFormat extends
|
|||
FileOutputFormat<ImmutableBytesWritable, LuceneDocumentWrapper> {
|
||||
static final Log LOG = LogFactory.getLog(IndexOutputFormat.class);
|
||||
|
||||
private Random random = new Random();
|
||||
|
||||
@Override
|
||||
public RecordWriter<ImmutableBytesWritable, LuceneDocumentWrapper>
|
||||
getRecordWriter(final FileSystem fs, JobConf job, String name,
|
||||
|
@ -53,7 +55,7 @@ public class IndexOutputFormat extends
|
|||
|
||||
final Path perm = new Path(FileOutputFormat.getOutputPath(job), name);
|
||||
final Path temp = job.getLocalPath("index/_"
|
||||
+ Integer.toString(new Random().nextInt()));
|
||||
+ Integer.toString(random.nextInt()));
|
||||
|
||||
LOG.info("To index into " + perm);
|
||||
|
||||
|
|
|
@ -82,7 +82,6 @@ implements TableMap<ImmutableBytesWritable, RowResult>, Tool {
|
|||
* @return the JobConf
|
||||
* @throws IOException
|
||||
*/
|
||||
@SuppressWarnings("unused")
|
||||
public JobConf createSubmittableJob(String[] args) throws IOException {
|
||||
JobConf c = new JobConf(getConf(), RowCounter.class);
|
||||
c.setJobName(NAME);
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.filter.StopRowFilter;
|
|||
import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.mapred.InputFormat;
|
||||
import org.apache.hadoop.mapred.InputSplit;
|
||||
|
|
|
@ -50,7 +50,7 @@ FileOutputFormat<ImmutableBytesWritable, BatchUpdate> {
|
|||
* Convert Reduce output (key, value) to (HStoreKey, KeyedDataArrayWritable)
|
||||
* and write to an HBase table
|
||||
*/
|
||||
protected class TableRecordWriter
|
||||
protected static class TableRecordWriter
|
||||
implements RecordWriter<ImmutableBytesWritable, BatchUpdate> {
|
||||
private HTable m_table;
|
||||
|
||||
|
|
|
@ -110,7 +110,7 @@ abstract class BaseScanner extends Chore implements HConstants {
|
|||
|
||||
// will use this variable to synchronize and make sure we aren't interrupted
|
||||
// mid-scan
|
||||
final Integer scannerLock = new Integer(0);
|
||||
final Object scannerLock = new Object();
|
||||
|
||||
BaseScanner(final HMaster master, final boolean rootRegion, final int period,
|
||||
final AtomicBoolean stop) {
|
||||
|
|
|
@ -38,8 +38,7 @@ abstract class ColumnOperation extends TableOperation {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void processScanItem(@SuppressWarnings("unused") String serverName,
|
||||
final HRegionInfo info)
|
||||
protected void processScanItem(String serverName, final HRegionInfo info)
|
||||
throws IOException {
|
||||
if (isEnabled(info)) {
|
||||
throw new TableNotDisabledException(tableName);
|
||||
|
|
|
@ -96,8 +96,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
|
||||
static final Log LOG = LogFactory.getLog(HMaster.class.getName());
|
||||
|
||||
public long getProtocolVersion(@SuppressWarnings("unused") String protocol,
|
||||
@SuppressWarnings("unused") long clientVersion) {
|
||||
public long getProtocolVersion(String protocol, long clientVersion) {
|
||||
return HBaseRPCProtocolVersion.versionID;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -76,8 +78,8 @@ public class MetaRegion implements Comparable<MetaRegion> {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = this.regionName.hashCode();
|
||||
result ^= this.startKey.hashCode();
|
||||
int result = Arrays.hashCode(this.regionName);
|
||||
result ^= Arrays.hashCode(this.startKey);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -57,10 +57,10 @@ class ModifyTableMeta extends TableOperation {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void processScanItem(@SuppressWarnings("unused") String serverName,
|
||||
protected void processScanItem(String serverName,
|
||||
final HRegionInfo info) throws IOException {
|
||||
if (isEnabled(info)) {
|
||||
throw new TableNotDisabledException(tableName.toString());
|
||||
throw new TableNotDisabledException(Bytes.toString(tableName));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,6 @@ class ProcessRegionOpen extends ProcessRegionStatusChange {
|
|||
* @param regionInfo
|
||||
* @throws IOException
|
||||
*/
|
||||
@SuppressWarnings("unused")
|
||||
public ProcessRegionOpen(HMaster master, HServerInfo info,
|
||||
HRegionInfo regionInfo)
|
||||
throws IOException {
|
||||
|
|
|
@ -51,7 +51,7 @@ class ProcessServerShutdown extends RegionServerOperation {
|
|||
private boolean rootRescanned;
|
||||
|
||||
|
||||
private class ToDoEntry {
|
||||
private static class ToDoEntry {
|
||||
boolean regionOffline;
|
||||
final byte [] row;
|
||||
final HRegionInfo info;
|
||||
|
|
|
@ -1315,6 +1315,12 @@ class RegionManager implements HConstants {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
return this.compareTo((RegionState) o) == 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
|||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Sleeper;
|
||||
|
||||
/**
|
||||
|
@ -75,8 +76,8 @@ abstract class RetryableMetaOperation<T> implements Callable<T> {
|
|||
if (LOG.isDebugEnabled()) {
|
||||
StringBuilder message = new StringBuilder(
|
||||
"Trying to contact region server for regionName '" +
|
||||
m.getRegionName() + "', but failed after " + (tries + 1) +
|
||||
" attempts.\n");
|
||||
Bytes.toString(m.getRegionName()) + "', but failed after " +
|
||||
(tries + 1) + " attempts.\n");
|
||||
int i = 1;
|
||||
for (IOException e2 : exceptions) {
|
||||
message.append("Exception " + i + ":\n" + e2);
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.Leases;
|
||||
import org.apache.hadoop.hbase.HMsg.Type;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
|
||||
import org.apache.zookeeper.WatchedEvent;
|
||||
import org.apache.zookeeper.Watcher;
|
||||
|
@ -76,7 +77,7 @@ class ServerManager implements HConstants {
|
|||
* and it's server logs are recovered, it will be told to call server startup
|
||||
* because by then, its regions have probably been reassigned.
|
||||
*/
|
||||
private final Set<String> deadServers =
|
||||
protected final Set<String> deadServers =
|
||||
Collections.synchronizedSet(new HashSet<String>());
|
||||
|
||||
/** SortedMap server load -> Set of server names */
|
||||
|
@ -87,7 +88,7 @@ class ServerManager implements HConstants {
|
|||
final Map<String, HServerLoad> serversToLoad =
|
||||
new ConcurrentHashMap<String, HServerLoad>();
|
||||
|
||||
private HMaster master;
|
||||
protected HMaster master;
|
||||
|
||||
// Last time we logged average load.
|
||||
private volatile long lastLogOfAverageLaod = 0;
|
||||
|
@ -490,7 +491,7 @@ class ServerManager implements HConstants {
|
|||
if (duplicateAssignment) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("region server " + serverInfo.getServerAddress().toString()
|
||||
+ " should not have opened region " + region.getRegionName());
|
||||
+ " should not have opened region " + Bytes.toString(region.getRegionName()));
|
||||
}
|
||||
|
||||
// This Region should not have been opened.
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
|||
import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Instantiated to delete a table. Table must be offline.
|
||||
|
@ -41,7 +42,7 @@ class TableDelete extends TableOperation {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void processScanItem(@SuppressWarnings("unused") String serverName,
|
||||
protected void processScanItem(String serverName,
|
||||
final HRegionInfo info) throws IOException {
|
||||
|
||||
if (isEnabled(info)) {
|
||||
|
@ -59,12 +60,12 @@ class TableDelete extends TableOperation {
|
|||
HRegion.deleteRegion(this.master.fs, this.master.rootdir, i);
|
||||
|
||||
} catch (IOException e) {
|
||||
LOG.error("failed to delete region " + i.getRegionName(),
|
||||
LOG.error("failed to delete region " + Bytes.toString(i.getRegionName()),
|
||||
RemoteExceptionHandler.checkIOException(e));
|
||||
}
|
||||
}
|
||||
|
||||
// delete the table's folder from fs.
|
||||
master.fs.delete(new Path(master.rootdir, tableName.toString()), true);
|
||||
master.fs.delete(new Path(master.rootdir, Bytes.toString(tableName)), true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -89,7 +89,8 @@ abstract class TableOperation implements HConstants {
|
|||
HRegionInfo info = this.master.getHRegionInfo(values.getRow(), values);
|
||||
if (info == null) {
|
||||
emptyRows.add(values.getRow());
|
||||
LOG.error(COL_REGIONINFO + " not found on " + values.getRow());
|
||||
LOG.error(Bytes.toString(COL_REGIONINFO) + " not found on " +
|
||||
Bytes.toString(values.getRow()));
|
||||
continue;
|
||||
}
|
||||
String serverAddress = Writables.cellToString(values.get(COL_SERVER));
|
||||
|
@ -125,7 +126,7 @@ abstract class TableOperation implements HConstants {
|
|||
if (emptyRows.size() > 0) {
|
||||
LOG.warn("Found " + emptyRows.size() +
|
||||
" rows with empty HRegionInfo while scanning meta region " +
|
||||
m.getRegionName());
|
||||
Bytes.toString(m.getRegionName()));
|
||||
master.deleteEmptyMetaRows(server, m.getRegionName(), emptyRows);
|
||||
}
|
||||
|
||||
|
|
|
@ -61,6 +61,7 @@ public class MasterMetrics implements Updater {
|
|||
/**
|
||||
* Since this object is a registered updater, this method will be called
|
||||
* periodically, e.g. every 5 seconds.
|
||||
* @param unused
|
||||
*/
|
||||
public void doUpdates(MetricsContext unused) {
|
||||
synchronized (this) {
|
||||
|
|
|
@ -53,7 +53,7 @@ public class MetricsRate {
|
|||
long now = System.currentTimeMillis();
|
||||
long diff = (now-ts)/1000;
|
||||
if (diff == 0) diff = 1; // sigh this is crap.
|
||||
this.prevRate = value / diff;
|
||||
this.prevRate = (float)value / diff;
|
||||
this.value = 0;
|
||||
this.ts = now;
|
||||
}
|
||||
|
|
|
@ -122,7 +122,7 @@ public class HLog implements HConstants, Syncable {
|
|||
|
||||
private volatile boolean closed = false;
|
||||
|
||||
private final Integer sequenceLock = new Integer(0);
|
||||
private final Object sequenceLock = new Object();
|
||||
private volatile long logSeqNum = 0;
|
||||
|
||||
private volatile long filenum = 0;
|
||||
|
@ -136,7 +136,7 @@ public class HLog implements HConstants, Syncable {
|
|||
|
||||
// We synchronize on updateLock to prevent updates and to prevent a log roll
|
||||
// during an update
|
||||
private final Integer updateLock = new Integer(0);
|
||||
private final Object updateLock = new Object();
|
||||
|
||||
/*
|
||||
* If more than this many logs, force flush of oldest region to oldest edit
|
||||
|
|
|
@ -61,6 +61,8 @@ public class HLogEdit implements Writable, HConstants {
|
|||
|
||||
/**
|
||||
* @param value
|
||||
* @param offset
|
||||
* @param length
|
||||
* @return True if an entry and its content is {@link #DELETED_BYTES}.
|
||||
*/
|
||||
public static boolean isDeleted(final byte [] value, final int offset,
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
import org.apache.hadoop.io.*;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* A Key for an entry in the change log.
|
||||
|
@ -94,13 +95,19 @@ public class HLogKey implements WritableComparable<HLogKey> {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
return compareTo((HLogKey)obj) == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = this.regionName.hashCode();
|
||||
result ^= this.row.hashCode();
|
||||
int result = Arrays.hashCode(this.regionName);
|
||||
result ^= Arrays.hashCode(this.row);
|
||||
result ^= this.logSeqNum;
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -192,7 +192,7 @@ public class HRegion implements HConstants {
|
|||
// Stop updates lock
|
||||
private final ReentrantReadWriteLock updatesLock =
|
||||
new ReentrantReadWriteLock();
|
||||
private final Integer splitLock = new Integer(0);
|
||||
private final Object splitLock = new Object();
|
||||
private long minSequenceId;
|
||||
final AtomicInteger activeScannerCount = new AtomicInteger(0);
|
||||
|
||||
|
@ -2474,7 +2474,8 @@ public class HRegion implements HConstants {
|
|||
|
||||
LOG.info("starting merge of regions: " + a + " and " + b +
|
||||
" into new region " + newRegionInfo.toString() +
|
||||
" with start key <" + startKey + "> and end key <" + endKey + ">");
|
||||
" with start key <" + Bytes.toString(startKey) + "> and end key <" +
|
||||
Bytes.toString(endKey) + ">");
|
||||
|
||||
// Move HStoreFiles under new region directory
|
||||
|
||||
|
@ -2665,7 +2666,7 @@ public class HRegion implements HConstants {
|
|||
private byte [] binaryIncrement(byte [] value, long amount) {
|
||||
for(int i=0;i<value.length;i++) {
|
||||
int cur = (int)(amount >> (8 * i)) % 256;
|
||||
int val = (int)(value[value.length-i-1] & 0xff);
|
||||
int val = value[value.length-i-1] & 0xff;
|
||||
int total = cur + val;
|
||||
if(total > 255) {
|
||||
amount += ((long)256 << (8 * i));
|
||||
|
|
|
@ -136,7 +136,7 @@ public class HRegionServer implements HConstants, HRegionInterface, HBaseRPCErro
|
|||
protected final HBaseConfiguration conf;
|
||||
|
||||
private final ServerConnection connection;
|
||||
private final AtomicBoolean haveRootRegion = new AtomicBoolean(false);
|
||||
protected final AtomicBoolean haveRootRegion = new AtomicBoolean(false);
|
||||
private FileSystem fs;
|
||||
private Path rootDir;
|
||||
private final Random rand = new Random();
|
||||
|
@ -757,6 +757,8 @@ public class HRegionServer implements HConstants, HRegionInterface, HBaseRPCErro
|
|||
* Thread for toggling safemode after some configurable interval.
|
||||
*/
|
||||
private class CompactionLimitThread extends Thread {
|
||||
protected CompactionLimitThread() {}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
// First wait until we exit safe mode
|
||||
|
@ -1233,8 +1235,8 @@ public class HRegionServer implements HConstants, HRegionInterface, HBaseRPCErro
|
|||
* Data structure to hold a HMsg and retries count.
|
||||
*/
|
||||
private static class ToDoEntry {
|
||||
private int tries;
|
||||
private final HMsg msg;
|
||||
protected int tries;
|
||||
protected final HMsg msg;
|
||||
ToDoEntry(HMsg msg) {
|
||||
this.tries = 0;
|
||||
this.msg = msg;
|
||||
|
@ -1406,14 +1408,14 @@ public class HRegionServer implements HConstants, HRegionInterface, HBaseRPCErro
|
|||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* Add a MSG_REPORT_PROCESS_OPEN to the outbound queue.
|
||||
* This method is called while region is in the queue of regions to process
|
||||
* and then while the region is being opened, it is called from the Worker
|
||||
* thread that is running the region open.
|
||||
* @param hri Region to add the message for
|
||||
*/
|
||||
protected void addProcessingMessage(final HRegionInfo hri) {
|
||||
public void addProcessingMessage(final HRegionInfo hri) {
|
||||
getOutboundMsgs().add(new HMsg(HMsg.Type.MSG_REPORT_PROCESS_OPEN, hri));
|
||||
}
|
||||
|
||||
|
|
|
@ -84,7 +84,8 @@ class Memcache {
|
|||
/**
|
||||
* Constructor.
|
||||
* @param ttl The TTL for cache entries, in milliseconds.
|
||||
* @param regionInfo The HRI for this cache
|
||||
* @param c
|
||||
* @param rc
|
||||
*/
|
||||
public Memcache(final long ttl, final Comparator<HStoreKey> c,
|
||||
final HStoreKey.StoreKeyComparator rc) {
|
||||
|
@ -454,7 +455,7 @@ class Memcache {
|
|||
if (Store.notExpiredAndNotInDeletes(this.ttl,
|
||||
found_key, now, deletes)) {
|
||||
candidateKeys.put(stripTimestamp(found_key),
|
||||
new Long(found_key.getTimestamp()));
|
||||
Long.valueOf(found_key.getTimestamp()));
|
||||
} else {
|
||||
if (deletedOrExpiredRow == null) {
|
||||
deletedOrExpiredRow = new HStoreKey(found_key);
|
||||
|
@ -523,7 +524,7 @@ class Memcache {
|
|||
if (Store.notExpiredAndNotInDeletes(this.ttl, found_key, now, deletes)) {
|
||||
lastRowFound = found_key.getRow();
|
||||
candidateKeys.put(stripTimestamp(found_key),
|
||||
new Long(found_key.getTimestamp()));
|
||||
Long.valueOf(found_key.getTimestamp()));
|
||||
} else {
|
||||
expires.add(found_key);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.DroppedSnapshotException;
|
|||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
/**
|
||||
|
@ -144,7 +145,7 @@ class MemcacheFlusher extends Thread implements FlushRequester {
|
|||
continue;
|
||||
} catch (Exception ex) {
|
||||
LOG.error("Cache flush failed" +
|
||||
(r != null ? (" for region " + r.getRegionName()) : ""),
|
||||
(r != null ? (" for region " + Bytes.toString(r.getRegionName())) : ""),
|
||||
ex);
|
||||
if (!server.checkFileSystem()) {
|
||||
break;
|
||||
|
@ -239,7 +240,7 @@ class MemcacheFlusher extends Thread implements FlushRequester {
|
|||
return false;
|
||||
} catch (IOException ex) {
|
||||
LOG.error("Cache flush failed"
|
||||
+ (region != null ? (" for region " + region.getRegionName()) : ""),
|
||||
+ (region != null ? (" for region " + Bytes.toString(region.getRegionName())) : ""),
|
||||
RemoteExceptionHandler.checkIOException(ex));
|
||||
if (!server.checkFileSystem()) {
|
||||
return false;
|
||||
|
@ -269,7 +270,7 @@ class MemcacheFlusher extends Thread implements FlushRequester {
|
|||
private synchronized void flushSomeRegions() {
|
||||
// keep flushing until we hit the low water mark
|
||||
long globalMemcacheSize = -1;
|
||||
ArrayList<HRegion> regionsToCompact = new ArrayList();
|
||||
ArrayList<HRegion> regionsToCompact = new ArrayList<HRegion>();
|
||||
for (SortedMap<Long, HRegion> m =
|
||||
this.server.getCopyOfOnlineRegionsSortedBySize();
|
||||
(globalMemcacheSize = server.getGlobalMemcacheSize()) >=
|
||||
|
|
|
@ -102,7 +102,7 @@ public class Store implements HConstants {
|
|||
private int maxFilesToCompact;
|
||||
private final long desiredMaxFileSize;
|
||||
private volatile long storeSize = 0L;
|
||||
private final Integer flushLock = new Integer(0);
|
||||
private final Object flushLock = new Object();
|
||||
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
|
||||
final byte [] storeName;
|
||||
private final String storeNameStr;
|
||||
|
@ -126,7 +126,7 @@ public class Store implements HConstants {
|
|||
private volatile long maxSeqId = -1;
|
||||
|
||||
private final Path compactionDir;
|
||||
private final Integer compactLock = new Integer(0);
|
||||
private final Object compactLock = new Object();
|
||||
private final int compactionThreshold;
|
||||
private final int blocksize;
|
||||
private final boolean bloomfilter;
|
||||
|
@ -255,7 +255,7 @@ public class Store implements HConstants {
|
|||
// but this is probably not what we want long term. If we got here there
|
||||
// has been data-loss
|
||||
LOG.warn("Exception processing reconstruction log " + reconstructionLog +
|
||||
" opening " + this.storeName +
|
||||
" opening " + Bytes.toString(this.storeName) +
|
||||
" -- continuing. Probably lack-of-HADOOP-1700 causing DATA LOSS!", e);
|
||||
} catch (IOException e) {
|
||||
// Presume we got here because of some HDFS issue. Don't just keep going.
|
||||
|
@ -263,7 +263,7 @@ public class Store implements HConstants {
|
|||
// again until human intervention but alternative has us skipping logs
|
||||
// and losing edits: HBASE-642.
|
||||
LOG.warn("Exception processing reconstruction log " + reconstructionLog +
|
||||
" opening " + this.storeName, e);
|
||||
" opening " + Bytes.toString(this.storeName), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
@ -1799,7 +1799,7 @@ public class Store implements HConstants {
|
|||
/*
|
||||
* Datastructure that holds size and row to split a file around.
|
||||
*/
|
||||
class StoreSize {
|
||||
static class StoreSize {
|
||||
private final long size;
|
||||
private final byte[] key;
|
||||
StoreSize(long size, byte[] key) {
|
||||
|
|
|
@ -158,7 +158,7 @@ public class StoreFile implements HConstants {
|
|||
* @return Calculated path to parent region file.
|
||||
* @throws IOException
|
||||
*/
|
||||
static Path getReferredToFile(final Path p) throws IOException {
|
||||
static Path getReferredToFile(final Path p) {
|
||||
Matcher m = REF_NAME_PARSER.matcher(p.getName());
|
||||
if (m == null || !m.matches()) {
|
||||
LOG.warn("Failed match of store file name " + p.toString());
|
||||
|
@ -252,6 +252,7 @@ public class StoreFile implements HConstants {
|
|||
super(fs, path, cache);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String toStringFirstKey() {
|
||||
String result = "";
|
||||
try {
|
||||
|
@ -262,6 +263,7 @@ public class StoreFile implements HConstants {
|
|||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String toStringLastKey() {
|
||||
String result = "";
|
||||
try {
|
||||
|
@ -287,6 +289,7 @@ public class StoreFile implements HConstants {
|
|||
return super.toString() + (isTop()? ", half=top": ", half=bottom");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String toStringFirstKey() {
|
||||
String result = "";
|
||||
try {
|
||||
|
@ -297,6 +300,7 @@ public class StoreFile implements HConstants {
|
|||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String toStringLastKey() {
|
||||
String result = "";
|
||||
try {
|
||||
|
@ -320,7 +324,6 @@ public class StoreFile implements HConstants {
|
|||
|
||||
/**
|
||||
* @throws IOException
|
||||
* @see #open()
|
||||
*/
|
||||
public synchronized void close() throws IOException {
|
||||
if (this.reader != null) {
|
||||
|
@ -329,6 +332,7 @@ public class StoreFile implements HConstants {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return this.path.toString() +
|
||||
(isReference()? "-" + this.referencePath + "-" + reference.toString(): "");
|
||||
|
@ -388,6 +392,7 @@ public class StoreFile implements HConstants {
|
|||
* @param blocksize
|
||||
* @param algorithm Pass null to get default.
|
||||
* @param c Pass null to get default.
|
||||
* @param bloomfilter
|
||||
* @return HFile.Writer
|
||||
* @throws IOException
|
||||
*/
|
||||
|
|
|
@ -191,7 +191,7 @@ implements ChangedReadersObserver {
|
|||
}
|
||||
|
||||
// Data stucture to hold next, viable row (and timestamp).
|
||||
class ViableRow {
|
||||
static class ViableRow {
|
||||
private final byte [] row;
|
||||
private final long ts;
|
||||
|
||||
|
|
|
@ -283,7 +283,7 @@ class StoreScanner implements InternalScanner, ChangedReadersObserver {
|
|||
try {
|
||||
scanners[i].close();
|
||||
} catch (IOException e) {
|
||||
LOG.warn(store.storeName + " failed closing scanner " + i, e);
|
||||
LOG.warn(Bytes.toString(store.storeName) + " failed closing scanner " + i, e);
|
||||
}
|
||||
} finally {
|
||||
scanners[i] = null;
|
||||
|
|
|
@ -30,8 +30,6 @@ import org.apache.hadoop.metrics.MetricsUtil;
|
|||
import org.apache.hadoop.metrics.Updater;
|
||||
import org.apache.hadoop.metrics.jvm.JvmMetrics;
|
||||
import org.apache.hadoop.metrics.util.MetricsIntValue;
|
||||
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
|
||||
|
||||
|
||||
/**
|
||||
* This class is for maintaining the various regionserver statistics
|
||||
|
@ -96,6 +94,7 @@ public class RegionServerMetrics implements Updater {
|
|||
/**
|
||||
* Since this object is a registered updater, this method will be called
|
||||
* periodically, e.g. every 5 seconds.
|
||||
* @param unused
|
||||
*/
|
||||
public void doUpdates(MetricsContext unused) {
|
||||
synchronized (this) {
|
||||
|
|
|
@ -67,9 +67,9 @@ class TransactionState {
|
|||
* Simple container of the range of the scanners we've opened. Used to check
|
||||
* for conflicting writes.
|
||||
*/
|
||||
private class ScanRange {
|
||||
private byte[] startRow;
|
||||
private byte[] endRow;
|
||||
private static class ScanRange {
|
||||
protected byte[] startRow;
|
||||
protected byte[] endRow;
|
||||
|
||||
public ScanRange(byte[] startRow, byte[] endRow) {
|
||||
this.startRow = startRow;
|
||||
|
@ -80,7 +80,7 @@ class TransactionState {
|
|||
* Check if this scan range contains the given key.
|
||||
*
|
||||
* @param rowKey
|
||||
* @return
|
||||
* @return boolean
|
||||
*/
|
||||
public boolean contains(byte[] rowKey) {
|
||||
if (startRow != null && Bytes.compareTo(rowKey, startRow) < 0) {
|
||||
|
@ -260,7 +260,7 @@ class TransactionState {
|
|||
/**
|
||||
* Set the startSequenceNumber.
|
||||
*
|
||||
* @param startSequenceNumber.
|
||||
* @param startSequenceNumber
|
||||
*/
|
||||
void setStartSequenceNumber(final int startSequenceNumber) {
|
||||
this.startSequenceNumber = startSequenceNumber;
|
||||
|
|
|
@ -86,7 +86,7 @@ public class TransactionalRegion extends HRegion {
|
|||
static final Log LOG = LogFactory.getLog(TransactionalRegion.class);
|
||||
|
||||
// Collection of active transactions (PENDING) keyed by id.
|
||||
private Map<String, TransactionState> transactionsById = new HashMap<String, TransactionState>();
|
||||
protected Map<String, TransactionState> transactionsById = new HashMap<String, TransactionState>();
|
||||
|
||||
// Map of recent transactions that are COMMIT_PENDING or COMMITED keyed by
|
||||
// their sequence number
|
||||
|
@ -533,7 +533,7 @@ public class TransactionalRegion extends HRegion {
|
|||
transactionsById.remove(key);
|
||||
}
|
||||
|
||||
private TransactionState getTransactionState(final long transactionId)
|
||||
protected TransactionState getTransactionState(final long transactionId)
|
||||
throws UnknownTransactionException {
|
||||
String key = String.valueOf(transactionId);
|
||||
TransactionState state = null;
|
||||
|
@ -622,8 +622,7 @@ public class TransactionalRegion extends HRegion {
|
|||
}
|
||||
|
||||
// TODO, resolve from the global transaction log
|
||||
@SuppressWarnings("unused")
|
||||
private void resolveTransactionFromLog(final long transactionId) {
|
||||
protected void resolveTransactionFromLog() {
|
||||
throw new RuntimeException("Globaql transaction log is not Implemented");
|
||||
}
|
||||
|
||||
|
@ -653,7 +652,7 @@ public class TransactionalRegion extends HRegion {
|
|||
LOG.info("Transaction " + s.getTransactionId()
|
||||
+ " expired in COMMIT_PENDING state");
|
||||
LOG.info("Checking transaction status in transaction log");
|
||||
resolveTransactionFromLog(s.getTransactionId());
|
||||
resolveTransactionFromLog();
|
||||
break;
|
||||
default:
|
||||
LOG.warn("Unexpected status on expired lease");
|
||||
|
|
|
@ -21,8 +21,6 @@ package org.apache.hadoop.hbase.rest;
|
|||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
|
@ -31,8 +29,6 @@ import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser;
|
|||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
public abstract class AbstractController implements RESTConstants {
|
||||
|
||||
private Log LOG = LogFactory.getLog(AbstractController.class);
|
||||
protected Configuration conf;
|
||||
protected AbstractModel model;
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ public abstract class AbstractModel {
|
|||
public static Encoding EBase64 = new Encoding() {
|
||||
|
||||
public String encode(byte[] b) throws HBaseRestException {
|
||||
return new String(Base64.encodeBytes(b));
|
||||
return Base64.encodeBytes(b);
|
||||
}
|
||||
};
|
||||
public static Encoding EUTF8 = new Encoding() {
|
||||
|
@ -60,7 +60,7 @@ public abstract class AbstractModel {
|
|||
};
|
||||
}
|
||||
|
||||
protected static Encodings.Encoding encoding = Encodings.EUTF8;
|
||||
protected static final Encodings.Encoding encoding = Encodings.EUTF8;
|
||||
|
||||
public void initialize(HBaseConfiguration conf, HBaseAdmin admin) {
|
||||
this.conf = conf;
|
||||
|
@ -85,7 +85,7 @@ public abstract class AbstractModel {
|
|||
}
|
||||
}
|
||||
|
||||
protected static byte COLON = Bytes.toBytes(":")[0];
|
||||
protected static final byte COLON = Bytes.toBytes(":")[0];
|
||||
|
||||
protected boolean isColumnFamily(byte[] columnName) {
|
||||
for (int i = 0; i < columnName.length; i++) {
|
||||
|
|
|
@ -38,8 +38,7 @@ public class DatabaseController extends AbstractController {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected AbstractModel generateModel(
|
||||
@SuppressWarnings("hiding") HBaseConfiguration conf,
|
||||
protected AbstractModel generateModel(HBaseConfiguration conf,
|
||||
HBaseAdmin admin) {
|
||||
return new DatabaseModel(conf, admin);
|
||||
}
|
||||
|
|
|
@ -21,9 +21,7 @@ package org.apache.hadoop.hbase.rest;
|
|||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.servlet.ServletException;
|
||||
|
@ -42,7 +40,6 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
import org.apache.hadoop.hbase.util.InfoServer;
|
||||
import org.mortbay.http.NCSARequestLog;
|
||||
import org.mortbay.http.SocketListener;
|
||||
import org.mortbay.jetty.servlet.WebApplicationContext;
|
||||
|
||||
/**
|
||||
* Servlet implementation class for hbase REST interface. Presumes container
|
||||
|
@ -389,8 +386,7 @@ public class Dispatcher extends javax.servlet.http.HttpServlet {
|
|||
}
|
||||
}
|
||||
|
||||
protected IHBaseRestParser getParser(HttpServletRequest request)
|
||||
throws HBaseRestException {
|
||||
protected IHBaseRestParser getParser(HttpServletRequest request) {
|
||||
return HBaseRestParserFactory.getParser(ContentType.getContentType(request
|
||||
.getHeader("content-type")));
|
||||
}
|
||||
|
@ -478,8 +474,7 @@ public class Dispatcher extends javax.servlet.http.HttpServlet {
|
|||
NCSARequestLog ncsa = new NCSARequestLog();
|
||||
ncsa.setLogLatency(true);
|
||||
webServer.setRequestLog(ncsa);
|
||||
WebApplicationContext context = webServer.addWebApplication("/", InfoServer
|
||||
.getWebAppDir("rest"));
|
||||
webServer.addWebApplication("/", InfoServer.getWebAppDir("rest"));
|
||||
webServer.start();
|
||||
}
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ public interface RESTConstants {
|
|||
|
||||
static class FactoryMap {
|
||||
|
||||
protected static boolean created = false;
|
||||
static boolean created = false;
|
||||
protected HashMap<String, FilterFactory> map = new HashMap<String, FilterFactory>();
|
||||
|
||||
protected FactoryMap() {
|
||||
|
@ -89,9 +89,8 @@ public interface RESTConstants {
|
|||
FactoryMap f = new FactoryMap();
|
||||
f.initialize();
|
||||
return f;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public FilterFactory get(String c) {
|
||||
|
|
|
@ -42,8 +42,8 @@ public class RowController extends AbstractController {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected AbstractModel generateModel(
|
||||
@SuppressWarnings("hiding") HBaseConfiguration conf, HBaseAdmin admin) {
|
||||
protected AbstractModel generateModel(HBaseConfiguration conf,
|
||||
HBaseAdmin admin) {
|
||||
return new RowModel(conf, admin);
|
||||
}
|
||||
|
||||
|
|
|
@ -25,8 +25,6 @@ import java.util.Map;
|
|||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
|
@ -42,9 +40,6 @@ import org.apache.hadoop.hbase.rest.exception.HBaseRestException;
|
|||
*/
|
||||
public class ScannerModel extends AbstractModel {
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private Log LOG = LogFactory.getLog(TableModel.class);
|
||||
|
||||
public ScannerModel(HBaseConfiguration config, HBaseAdmin admin) {
|
||||
super.initialize(config, admin);
|
||||
}
|
||||
|
@ -54,11 +49,11 @@ public class ScannerModel extends AbstractModel {
|
|||
//
|
||||
protected static class ScannerMaster {
|
||||
|
||||
protected static Map<Integer, Scanner> scannerMap = new ConcurrentHashMap<Integer, Scanner>();
|
||||
protected static AtomicInteger nextScannerId = new AtomicInteger(1);
|
||||
protected static final Map<Integer, Scanner> scannerMap = new ConcurrentHashMap<Integer, Scanner>();
|
||||
protected static final AtomicInteger nextScannerId = new AtomicInteger(1);
|
||||
|
||||
public Integer addScanner(Scanner scanner) {
|
||||
Integer i = new Integer(nextScannerId.getAndIncrement());
|
||||
Integer i = Integer.valueOf(nextScannerId.getAndIncrement());
|
||||
scannerMap.put(i, scanner);
|
||||
return i;
|
||||
}
|
||||
|
@ -81,7 +76,7 @@ public class ScannerModel extends AbstractModel {
|
|||
}
|
||||
}
|
||||
|
||||
protected static ScannerMaster scannerMaster = new ScannerMaster();
|
||||
protected static final ScannerMaster scannerMaster = new ScannerMaster();
|
||||
|
||||
/**
|
||||
* returns the next numResults RowResults from the Scaner mapped to Integer
|
||||
|
|
|
@ -43,7 +43,7 @@ public class TableController extends AbstractController {
|
|||
|
||||
@Override
|
||||
protected AbstractModel generateModel(
|
||||
@SuppressWarnings("hiding") HBaseConfiguration conf, HBaseAdmin admin) {
|
||||
HBaseConfiguration conf, HBaseAdmin admin) {
|
||||
return new TableModel(conf, admin);
|
||||
}
|
||||
|
||||
|
|
|
@ -143,11 +143,10 @@ public class TableModel extends AbstractModel {
|
|||
|
||||
/**
|
||||
* Get table metadata.
|
||||
*
|
||||
* @param request
|
||||
* @param response
|
||||
*
|
||||
* @param tableName
|
||||
* @throws IOException
|
||||
* @return HTableDescriptor
|
||||
* @throws HBaseRestException
|
||||
*/
|
||||
public HTableDescriptor getTableMetadata(final String tableName)
|
||||
throws HBaseRestException {
|
||||
|
@ -173,9 +172,9 @@ public class TableModel extends AbstractModel {
|
|||
|
||||
/**
|
||||
* Return region offsets.
|
||||
*
|
||||
* @param request
|
||||
* @param response
|
||||
* @param tableName
|
||||
* @return Regions
|
||||
* @throws HBaseRestException
|
||||
*/
|
||||
public Regions getTableRegions(final String tableName)
|
||||
throws HBaseRestException {
|
||||
|
@ -239,7 +238,7 @@ public class TableModel extends AbstractModel {
|
|||
}
|
||||
}
|
||||
|
||||
public class Regions implements ISerializable {
|
||||
public static class Regions implements ISerializable {
|
||||
byte[][] regionKey;
|
||||
|
||||
public Regions(byte[][] bs) {
|
||||
|
|
|
@ -42,7 +42,7 @@ public class TimestampController extends AbstractController {
|
|||
|
||||
@Override
|
||||
protected AbstractModel generateModel(
|
||||
@SuppressWarnings("hiding") HBaseConfiguration conf, HBaseAdmin admin) {
|
||||
HBaseConfiguration conf, HBaseAdmin admin) {
|
||||
return new TimestampModel(conf, admin);
|
||||
}
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ public class TimestampModel extends AbstractModel {
|
|||
* @param tableName
|
||||
* @param rowName
|
||||
* @param timestamp
|
||||
* @return
|
||||
* @return RowResult
|
||||
* @throws HBaseRestException
|
||||
*/
|
||||
public RowResult get(byte[] tableName, byte[] rowName, long timestamp)
|
||||
|
|
|
@ -41,7 +41,8 @@ public class RestCell extends Cell {
|
|||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param name
|
||||
* @param cell
|
||||
*/
|
||||
public RestCell(byte[] name, Cell cell) {
|
||||
super(cell.getValue(), cell.getTimestamp());
|
||||
|
|
|
@ -29,6 +29,7 @@ public interface FilterFactoryConstants {
|
|||
static String VALUE = "value";
|
||||
|
||||
static class MalformedFilterException extends HBaseRestException {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public MalformedFilterException() {
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
|||
import org.apache.hadoop.hbase.filter.RowFilterSet;
|
||||
import org.apache.hadoop.hbase.rest.RESTConstants;
|
||||
import org.apache.hadoop.hbase.rest.exception.HBaseRestException;
|
||||
import org.apache.hadoop.hbase.rest.filter.FilterFactoryConstants.MalformedFilterException;
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONException;
|
||||
import org.json.JSONObject;
|
||||
|
|
|
@ -35,7 +35,7 @@ public interface IHBaseRestParser {
|
|||
* Parses a HTableDescriptor given the input array.
|
||||
*
|
||||
* @param input
|
||||
* @return
|
||||
* @return HTableDescriptor
|
||||
* @throws HBaseRestException
|
||||
*/
|
||||
public HTableDescriptor getTableDescriptor(byte[] input)
|
||||
|
|
|
@ -23,7 +23,6 @@ import java.util.ArrayList;
|
|||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression;
|
||||
import org.apache.hadoop.hbase.rest.RESTConstants;
|
||||
import org.apache.hadoop.hbase.rest.descriptors.RowUpdateDescriptor;
|
||||
import org.apache.hadoop.hbase.rest.descriptors.ScannerDescriptor;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue