diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java
index f78dfb199c1..bbafc650b29 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java
@@ -103,8 +103,8 @@ class CatalogReplicaLoadBalanceSimpleSelector implements
}
}
- private final ConcurrentMapbytes
- * @throws org.apache.hadoop.hbase.exceptions.DeserializationException
* @see #toByteArray
*/
public static ColumnCountGetFilter parseFrom(final byte [] pbBytes)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
index 5151d79023d..39fe4f9b759 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
@@ -239,8 +239,8 @@ public class FuzzyRowFilter extends FilterBase {
byte[] nextRow() {
if (nextRows.isEmpty()) {
- throw new IllegalStateException(
- "NextRows should not be empty, make sure to call nextRow() after updateTracker() return true");
+ throw new IllegalStateException("NextRows should not be empty, "
+ + "make sure to call nextRow() after updateTracker() return true");
} else {
return nextRows.peek().getFirst();
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
index b4303cdb76d..f314bede082 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
@@ -399,7 +399,6 @@ public class SingleColumnValueFilter extends FilterBase {
/**
* @param pbBytes A pb serialized {@link SingleColumnValueFilter} instance
* @return An instance of {@link SingleColumnValueFilter} made from bytes
- * @throws org.apache.hadoop.hbase.exceptions.DeserializationException
* @see #toByteArray
*/
public static SingleColumnValueFilter parseFrom(final byte [] pbBytes)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 25a2f10c3a1..8014525b847 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -2952,9 +2952,7 @@ public final class ProtobufUtil {
}
/**
- * Creates {@link CompactionState} from
- * {@link org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState}
- * state
+ * Creates {@link CompactionState} from {@link GetRegionInfoResponse.CompactionState} state
* @param state the protobuf CompactionState
* @return CompactionState
*/
@@ -2967,9 +2965,7 @@ public final class ProtobufUtil {
}
/**
- * Creates {@link CompactionState} from
- * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos
- * .RegionLoad.CompactionState} state
+ * Creates {@link CompactionState} from {@link RegionLoad.CompactionState} state
* @param state the protobuf CompactionState
* @return CompactionState
*/
@@ -2989,8 +2985,7 @@ public final class ProtobufUtil {
}
/**
- * Creates {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type}
- * from {@link SnapshotType}
+ * Creates {@link SnapshotProtos.SnapshotDescription.Type} from {@link SnapshotType}
* @param type the SnapshotDescription type
* @return the protobuf SnapshotDescription type
*/
@@ -3000,8 +2995,8 @@ public final class ProtobufUtil {
}
/**
- * Creates {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type}
- * from the type of SnapshotDescription string
+ * Creates {@link SnapshotProtos.SnapshotDescription.Type} from the type of SnapshotDescription
+ * string
* @param snapshotDesc string representing the snapshot description type
* @return the protobuf SnapshotDescription type
*/
@@ -3011,18 +3006,16 @@ public final class ProtobufUtil {
}
/**
- * Creates {@link SnapshotType} from the type of
- * {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription}
- * @param type the snapshot description type
- * @return the protobuf SnapshotDescription type
+ * Creates {@link SnapshotType} from the {@link SnapshotProtos.SnapshotDescription.Type}
+ * @param type the snapshot description type
+ * @return the protobuf SnapshotDescription type
*/
public static SnapshotType createSnapshotType(SnapshotProtos.SnapshotDescription.Type type) {
return SnapshotType.valueOf(type.toString());
}
/**
- * Convert from {@link SnapshotDescription} to
- * {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription}
+ * Convert from {@link SnapshotDescription} to {@link SnapshotProtos.SnapshotDescription}
* @param snapshotDesc the POJO SnapshotDescription
* @return the protobuf SnapshotDescription
*/
@@ -3056,9 +3049,7 @@ public final class ProtobufUtil {
}
/**
- * Convert from
- * {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} to
- * {@link SnapshotDescription}
+ * Convert from {@link SnapshotProtos.SnapshotDescription} to {@link SnapshotDescription}
* @param snapshotDesc the protobuf SnapshotDescription
* @return the POJO SnapshotDescription
*/
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
index a6be2378560..7b995b0b8cd 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
@@ -38,9 +38,9 @@ import org.apache.yetus.audience.InterfaceStability;
* {@link BBKVComparator}. Use it when mostly {@link ByteBufferKeyValue}s.
*
<4 bytes keylength> <4 bytes valuelength> <2 bytes rowlength>
* <row> <1 byte columnfamilylength> <columnfamily> <columnqualifier>
* <8 bytes timestamp> <1 byte keytype> <value> <2 bytes tagslength>
* <tags>
+ * @param withTags Whether to write tags.
+ * @return Bytes count required to serialize this Cell in a {@link KeyValue} format.
*/
// TODO remove the boolean param once HBASE-16706 is done.
default int getSerializedSize(boolean withTags) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index efa442bd254..a10e91929f6 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -2371,21 +2371,23 @@ public class KeyValue implements ExtendedCell, Cloneable {
/**
* HeapSize implementation
- *
+ *
* We do not count the bytes in the rowCache because it should be empty for a KeyValue in the
* MemStore.
*/
@Override
public long heapSize() {
- /*
- * Deep object overhead for this KV consists of two parts. The first part is the KV object
- * itself, while the second part is the backing byte[]. We will only count the array overhead
- * from the byte[] only if this is the first KV in there.
- */
- return ClassSize.align(FIXED_OVERHEAD) +
- (offset == 0
- ? ClassSize.sizeOfByteArray(length) // count both length and object overhead
- : length); // only count the number of bytes
+ // Deep object overhead for this KV consists of two parts. The first part is the KV object
+ // itself, while the second part is the backing byte[]. We will only count the array overhead
+ // from the byte[] only if this is the first KV in there.
+ int fixed = ClassSize.align(FIXED_OVERHEAD);
+ if (offset == 0) {
+ // count both length and object overhead
+ return fixed + ClassSize.sizeOfByteArray(length);
+ } else {
+ // only count the number of bytes
+ return fixed + length;
+ }
}
/**
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
index af61256cad0..54b95e3aa53 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
@@ -1589,11 +1589,13 @@ public final class PrivateCellUtil {
}
private static class FirstOnRowCell extends EmptyCell {
+ // @formatter:off
private static final int FIXED_HEAPSIZE =
ClassSize.OBJECT // object
+ ClassSize.REFERENCE // row array
+ Bytes.SIZEOF_INT // row offset
+ Bytes.SIZEOF_SHORT; // row length
+ // @formatter:on
private final byte[] rowArray;
private final int roffset;
private final short rlength;
@@ -1643,11 +1645,13 @@ public final class PrivateCellUtil {
}
private static class FirstOnRowByteBufferExtendedCell extends EmptyByteBufferExtendedCell {
+ // @formatter:off
private static final int FIXED_OVERHEAD =
ClassSize.OBJECT // object
+ ClassSize.REFERENCE // row buffer
+ Bytes.SIZEOF_INT // row offset
+ Bytes.SIZEOF_SHORT; // row length
+ // @formatter:on
private final ByteBuffer rowBuff;
private final int roffset;
private final short rlength;
@@ -1698,11 +1702,12 @@ public final class PrivateCellUtil {
}
private static class LastOnRowByteBufferExtendedCell extends EmptyByteBufferExtendedCell {
- private static final int FIXED_OVERHEAD =
- ClassSize.OBJECT // object
+ // @formatter:off
+ private static final int FIXED_OVERHEAD = ClassSize.OBJECT // object
+ ClassSize.REFERENCE // rowBuff
+ Bytes.SIZEOF_INT // roffset
+ Bytes.SIZEOF_SHORT; // rlength
+ // @formatter:on
private final ByteBuffer rowBuff;
private final int roffset;
private final short rlength;
@@ -1754,11 +1759,12 @@ public final class PrivateCellUtil {
private static class FirstOnRowColByteBufferExtendedCell
extends FirstOnRowByteBufferExtendedCell {
- private static final int FIXED_OVERHEAD =
- FirstOnRowByteBufferExtendedCell.FIXED_OVERHEAD
- + ClassSize.REFERENCE * 2 // family buffer and column buffer
- + Bytes.SIZEOF_INT * 3 // famOffset, colOffset, colLength
- + Bytes.SIZEOF_BYTE; // famLength
+ // @formatter:off
+ private static final int FIXED_OVERHEAD = FirstOnRowByteBufferExtendedCell.FIXED_OVERHEAD
+ + ClassSize.REFERENCE * 2 // family buffer and column buffer
+ + Bytes.SIZEOF_INT * 3 // famOffset, colOffset, colLength
+ + Bytes.SIZEOF_BYTE; // famLength
+ // @formatter:on
private final ByteBuffer famBuff;
private final int famOffset;
private final byte famLength;
@@ -1823,11 +1829,12 @@ public final class PrivateCellUtil {
}
private static class FirstOnRowColCell extends FirstOnRowCell {
- private static final long FIXED_HEAPSIZE =
- FirstOnRowCell.FIXED_HEAPSIZE
+ // @formatter:off
+ private static final long FIXED_HEAPSIZE = FirstOnRowCell.FIXED_HEAPSIZE
+ Bytes.SIZEOF_BYTE // flength
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
+ ClassSize.REFERENCE * 2; // fArray, qArray
+ // @formatter:on
private final byte[] fArray;
private final int foffset;
private final byte flength;
@@ -1886,10 +1893,11 @@ public final class PrivateCellUtil {
}
private static class FirstOnRowColTSCell extends FirstOnRowColCell {
- private static final long FIXED_HEAPSIZE =
- FirstOnRowColCell.FIXED_HEAPSIZE
- + Bytes.SIZEOF_LONG; // ts
+ // @formatter:off
+ private static final long FIXED_HEAPSIZE = FirstOnRowColCell.FIXED_HEAPSIZE
+ + Bytes.SIZEOF_LONG; // ts
private long ts;
+ // @formatter:on
public FirstOnRowColTSCell(byte[] rArray, int roffset, short rlength, byte[] fArray,
int foffset, byte flength, byte[] qArray, int qoffset, int qlength, long ts) {
@@ -1910,10 +1918,11 @@ public final class PrivateCellUtil {
private static class FirstOnRowColTSByteBufferExtendedCell
extends FirstOnRowColByteBufferExtendedCell {
- private static final int FIXED_OVERHEAD =
- FirstOnRowColByteBufferExtendedCell.FIXED_OVERHEAD
- + Bytes.SIZEOF_LONG; // ts
+ // @formatter:off
+ private static final int FIXED_OVERHEAD = FirstOnRowColByteBufferExtendedCell.FIXED_OVERHEAD
+ + Bytes.SIZEOF_LONG; // ts
private long ts;
+ // @formatter:on
public FirstOnRowColTSByteBufferExtendedCell(ByteBuffer rBuffer, int roffset, short rlength,
ByteBuffer fBuffer, int foffset, byte flength, ByteBuffer qBuffer, int qoffset, int qlength,
@@ -1934,11 +1943,12 @@ public final class PrivateCellUtil {
}
private static class LastOnRowCell extends EmptyCell {
- private static final int FIXED_OVERHEAD =
- ClassSize.OBJECT // object
+ // @formatter:off
+ private static final int FIXED_OVERHEAD = ClassSize.OBJECT // object
+ ClassSize.REFERENCE // row array
+ Bytes.SIZEOF_INT // row offset
+ Bytes.SIZEOF_SHORT; // row length
+ // @formatter:on
private final byte[] rowArray;
private final int roffset;
private final short rlength;
@@ -1988,10 +1998,12 @@ public final class PrivateCellUtil {
}
private static class LastOnRowColCell extends LastOnRowCell {
+ // @formatter:off
private static final long FIXED_OVERHEAD = LastOnRowCell.FIXED_OVERHEAD
- + ClassSize.REFERENCE * 2 // fArray and qArray
- + Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
- + Bytes.SIZEOF_BYTE; // flength
+ + ClassSize.REFERENCE * 2 // fArray and qArray
+ + Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
+ + Bytes.SIZEOF_BYTE; // flength
+ // @formatter:on
private final byte[] fArray;
private final int foffset;
private final byte flength;
@@ -2050,11 +2062,12 @@ public final class PrivateCellUtil {
}
private static class LastOnRowColByteBufferExtendedCell extends LastOnRowByteBufferExtendedCell {
- private static final int FIXED_OVERHEAD =
- LastOnRowByteBufferExtendedCell.FIXED_OVERHEAD
- + ClassSize.REFERENCE * 2 // fBuffer and qBuffer
- + Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
- + Bytes.SIZEOF_BYTE; // flength
+ // @formatter:off
+ private static final int FIXED_OVERHEAD = LastOnRowByteBufferExtendedCell.FIXED_OVERHEAD
+ + ClassSize.REFERENCE * 2 // fBuffer and qBuffer
+ + Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
+ + Bytes.SIZEOF_BYTE; // flength
+ // @formatter:on
private final ByteBuffer fBuffer;
private final int foffset;
private final byte flength;
@@ -2119,11 +2132,12 @@ public final class PrivateCellUtil {
}
private static class FirstOnRowDeleteFamilyCell extends EmptyCell {
- private static final int FIXED_OVERHEAD =
- ClassSize.OBJECT // object
+ // @formatter:off
+ private static final int FIXED_OVERHEAD = ClassSize.OBJECT // object
+ ClassSize.REFERENCE * 2 // fBuffer and qBuffer
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
+ Bytes.SIZEOF_BYTE; // flength
+ // @formatter:on
private final byte[] row;
private final byte[] fam;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
index f1589ba093b..c8b84574645 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
@@ -164,17 +164,22 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
}
FileSystem fs = pathPattern.getFileSystem(conf);
- Path pathPattern1 = fs.isDirectory(pathPattern) ?
- new Path(pathPattern, "*.jar") : pathPattern; // append "*.jar" if a directory is specified
- FileStatus[] fileStatuses = fs.globStatus(pathPattern1); // return all files that match the pattern
- if (fileStatuses == null || fileStatuses.length == 0) { // if no one matches
+ // append "*.jar" if a directory is specified
+ Path pathPattern1 = fs.isDirectory(pathPattern) ? new Path(pathPattern, "*.jar") : pathPattern;
+ // return all files that match the pattern
+ FileStatus[] fileStatuses = fs.globStatus(pathPattern1);
+ if (fileStatuses == null || fileStatuses.length == 0) {
+ // if no one matches
throw new FileNotFoundException(pathPattern1.toString());
} else {
boolean validFileEncountered = false;
- for (Path path : FileUtil.stat2Paths(fileStatuses)) { // for each file that match the pattern
- if (fs.isFile(path)) { // only process files, skip for directories
- File dst = new File(parentDirStr, "." + pathPrefix + "."
- + path.getName() + "." + System.currentTimeMillis() + ".jar");
+ // for each file that match the pattern
+ for (Path path : FileUtil.stat2Paths(fileStatuses)) {
+ if (fs.isFile(path)) {
+ // only process files, skip for directories
+ File dst = new File(parentDirStr,
+ "." + pathPrefix + "." + path.getName() + "." + System.currentTimeMillis()
+ + ".jar");
fs.copyToLocalFile(path, new Path(dst.toString()));
dst.deleteOnExit();
@@ -182,7 +187,8 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
JarFile jarFile = new JarFile(dst.toString());
try {
- Enumeration{@code * Job job = new Job(conf); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java index ef5c161d4cd..76254c3f6ba 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java @@ -78,8 +78,7 @@ public class MultiTableSnapshotInputFormatImpl { /** * Return the list of splits extracted from the scans/snapshots pushed to conf by - * {@link - * #setInput(org.apache.hadoop.conf.Configuration, java.util.Map, org.apache.hadoop.fs.Path)} + * {@link #setInput(Configuration, Map, Path)} * * @param conf Configuration to determine splits from * @return Return the list of splits extracted from the scans/snapshots pushed to conf @@ -115,7 +114,7 @@ public class MultiTableSnapshotInputFormatImpl { /** * Retrieve the snapshot name -> list<scan> mapping pushed to configuration by - * {@link #setSnapshotToScans(org.apache.hadoop.conf.Configuration, java.util.Map)} + * {@link #setSnapshotToScans(Configuration, Map)} * * @param conf Configuration to extract name -> list<scan> mappings from. * @return the snapshot name -> list<scan> mapping pushed to configuration diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java index ca82e2a58ee..4ad1935f37a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java @@ -231,8 +231,9 @@ public class MultithreadedTableMapperextends TableMapper { } } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION", - justification="Don't understand why FB is complaining about this one. We do throw exception") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION", + justification = "Don't understand why FB is complaining about this one." + + " We do throw exception") private class MapRunner implements Runnable { private Mapper mapper; private Context subcontext; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java index 4152182a6d3..3b9a13879e8 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java @@ -65,10 +65,10 @@ import org.slf4j.LoggerFactory; /** - * Scans a given table + CF for all mob reference cells to get the list of backing mob files. - * For each referenced file we attempt to verify that said file is on the FileSystem in a place - * that the MOB system will look when attempting to resolve the actual value. - * + * Scans a given table + CF for all mob reference cells to get the list of backing mob files. For + * each referenced file we attempt to verify that said file is on the FileSystem in a place that the + * MOB system will look when attempting to resolve the actual value. + * * The job includes counters that can help provide a rough sketch of the mob data. * * @@ -94,31 +94,31 @@ import org.slf4j.LoggerFactory; * Number of rows with total size in the 100,000s of bytes=6838 * Number of rows with total size in the 1,000,000s of bytes=3162 *- * - * * Map-Reduce Framework:Map input records - the number of rows with mob references - * * Map-Reduce Framework:Reduce output records - the number of unique hfiles referenced - * * MOB:NUM_CELLS - the total number of mob reference cells - * * PROBLEM:Affected rows - the number of rows that reference hfiles with an issue - * * PROBLEM:Problem MOB files - the number of unique hfiles that have an issue - * * CELLS PER ROW: - this counter group gives a histogram of the order of magnitude of the - * number of cells in a given row by grouping by the number of digits used in each count. - * This allows us to see more about the distribution of cells than what we can determine - * with just the cell count and the row count. In this particular example we can see that - * all of our rows have somewhere between 1 - 9 cells. - * * ROWS WITH PROBLEMS PER FILE: - this counter group gives a histogram of the order of - * magnitude of the number of rows in each of the hfiles with a problem. e.g. in the - * example there are 2 hfiles and they each have the same order of magnitude number of rows, - * specifically between 100 and 999. - * * SIZES OF CELLS: - this counter group gives a histogram of the order of magnitude of - * the size of mob values according to our reference cells. e.g. in the example above we - * have cell sizes that are all between 10,000 bytes and 9,999,999 bytes. From this - * histogram we can also see that _most_ cells are 100,000 - 999,000 bytes and the smaller - * and bigger ones are outliers making up less than 2% of mob cells. - * * SIZES OF ROWS: - this counter group gives a histogram of the order of magnitude of the - * size of mob values across each row according to our reference cells. In the example above - * we have rows that are are between 100,000 bytes and 9,999,999 bytes. We can also see that - * about 2/3rd of our rows are 100,000 - 999,999 bytes. - * + *+ *
* Generates a report that gives one file status per line, with tabs dividing fields. * *- Map-Reduce Framework:Map input records - the number of rows with mob references
+ *- Map-Reduce Framework:Reduce output records - the number of unique hfiles referenced
+ *- MOB:NUM_CELLS - the total number of mob reference cells
+ *- PROBLEM:Affected rows - the number of rows that reference hfiles with an issue
+ *- PROBLEM:Problem MOB files - the number of unique hfiles that have an issue
+ *- CELLS PER ROW: - this counter group gives a histogram of the order of magnitude of the number + * of cells in a given row by grouping by the number of digits used in each count. This allows us to + * see more about the distribution of cells than what we can determine with just the cell count and + * the row count. In this particular example we can see that all of our rows have somewhere between + * 1 - 9 cells.
+ *- ROWS WITH PROBLEMS PER FILE: - this counter group gives a histogram of the order of magnitude + * of the number of rows in each of the hfiles with a problem. e.g. in the example there are 2 + * hfiles and they each have the same order of magnitude number of rows, specifically between 100 + * and 999.
+ *- SIZES OF CELLS: - this counter group gives a histogram of the order of magnitude of the size + * of mob values according to our reference cells. e.g. in the example above we have cell sizes that + * are all between 10,000 bytes and 9,999,999 bytes. From this histogram we can also see that _most_ + * cells are 100,000 - 999,000 bytes and the smaller and bigger ones are outliers making up less + * than 2% of mob cells.
+ *- SIZES OF ROWS: - this counter group gives a histogram of the order of magnitude of the size + * of mob values across each row according to our reference cells. In the example above we have rows + * that are are between 100,000 bytes and 9,999,999 bytes. We can also see that about 2/3rd of our + * rows are 100,000 - 999,999 bytes.
+ *@@ -133,32 +133,31 @@ import org.slf4j.LoggerFactory; ** * Possible results are listed; the first three indicate things are working properly. - * * MOB DIR - the reference is in the normal MOB area for the given table and CF - * * HLINK TO ARCHIVE FOR SAME TABLE - the reference is present in the archive area for this - * table and CF - * * HLINK TO ARCHIVE FOR OTHER TABLE - the reference is present in a different table and CF, - * either in the MOB or archive areas (e.g. from a snapshot restore or clone) - * * ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE - the reference is currently present in the archive - * area for this table and CF, but it is kept there because a _different_ table has a - * reference to it (e.g. from a snapshot clone). If these other tables are removed then - * the file will likely be deleted unless there is a snapshot also referencing it. - * * ARCHIVE BUT NO HLINKS - the reference is currently present in the archive for this table and - * CF, but there are no references present to prevent its removal. Unless it is newer than - * the general TTL (default 5 minutes) or referenced in a snapshot it will be subject to - * cleaning. - * * ARCHIVE BUT FAILURE WHILE CHECKING HLINKS - Check the job logs to see why things failed while - * looking for why this file is being kept around. - * * MISSING FILE - We couldn't find the reference on the FileSystem. Either there is dataloss due - * to a bug in the MOB storage system or the MOB storage is damaged but in an edge case that - * allows it to work for now. You can verify which by doing a raw reference scan to get the - * referenced hfile and check the underlying filesystem. See the ref guide section on mob - * for details. - * * HLINK BUT POINT TO MISSING FILE - There is a pointer in our mob area for this table and CF - * to a file elsewhere on the FileSystem, however the file it points to no longer exists. - * * MISSING FILE BUT FAILURE WHILE CHECKING HLINKS - We could not find the referenced file, - * however you should check the job logs to see why we couldn't check to see if there is a - * pointer to the referenced file in our archive or another table's archive or mob area. - * + *+ *
*/ @InterfaceAudience.Private public class MobRefReporter extends Configured implements Tool { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index aaa49e607e7..f9f16b5e350 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -297,15 +297,16 @@ public class TestImportExport { IMPORT_TABLE, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - /* exportedTableIn94Format contains 5 rows - ROW COLUMN+CELL - r1 column=f1:c1, timestamp=1383766761171, value=val1 - r2 column=f1:c1, timestamp=1383766771642, value=val2 - r3 column=f1:c1, timestamp=1383766777615, value=val3 - r4 column=f1:c1, timestamp=1383766785146, value=val4 - r5 column=f1:c1, timestamp=1383766791506, value=val5 - */ - assertEquals(5, UTIL.countRows(t)); + // @formatter:off + // exportedTableIn94Format contains 5 rows + // ROW COLUMN+CELL + // r1 column=f1:c1, timestamp=1383766761171, value=val1 + // r2 column=f1:c1, timestamp=1383766771642, value=val2 + // r3 column=f1:c1, timestamp=1383766777615, value=val3 + // r4 column=f1:c1, timestamp=1383766785146, value=val4 + // r5 column=f1:c1, timestamp=1383766791506, value=val5 + // @formatter:on + assertEquals(5, UTIL.countRows(t)); } } @@ -330,12 +331,9 @@ public class TestImportExport { p.addColumn(FAMILYA, QUAL, now + 3, QUAL); p.addColumn(FAMILYA, QUAL, now + 4, QUAL); t.put(p); - - String[] args = new String[] { - "-D" + ExportUtils.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE, // added scanner batching arg. - name.getMethodName(), - FQ_OUTPUT_DIR - }; + // added scanner batching arg. + String[] args = new String[] { "-D" + ExportUtils.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE, + name.getMethodName(), FQ_OUTPUT_DIR }; assertTrue(runExport(args)); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java index 99f8e3df447..808b77bc9d6 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java @@ -48,8 +48,9 @@ public class TestRowModel extends TestModelBase- MOB DIR - the reference is in the normal MOB area for the given table and CF
+ *- HLINK TO ARCHIVE FOR SAME TABLE - the reference is present in the archive area for this table + * and CF
+ *- HLINK TO ARCHIVE FOR OTHER TABLE - the reference is present in a different table and CF, + * either in the MOB or archive areas (e.g. from a snapshot restore or clone)
+ *- ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE - the reference is currently present in the archive + * area for this table and CF, but it is kept there because a _different_ table has a reference to + * it (e.g. from a snapshot clone). If these other tables are removed then the file will likely be + * deleted unless there is a snapshot also referencing it.
+ *- ARCHIVE BUT NO HLINKS - the reference is currently present in the archive for this table and + * CF, but there are no references present to prevent its removal. Unless it is newer than the + * general TTL (default 5 minutes) or referenced in a snapshot it will be subject to cleaning.
+ *- ARCHIVE BUT FAILURE WHILE CHECKING HLINKS - Check the job logs to see why things failed while + * looking for why this file is being kept around.
+ *- MISSING FILE - We couldn't find the reference on the FileSystem. Either there is dataloss due + * to a bug in the MOB storage system or the MOB storage is damaged but in an edge case that allows + * it to work for now. You can verify which by doing a raw reference scan to get the referenced + * hfile and check the underlying filesystem. See the ref guide section on mob for details.
+ *- HLINK BUT POINT TO MISSING FILE - There is a pointer in our mob area for this table and CF to + * a file elsewhere on the FileSystem, however the file it points to no longer exists.
+ *- MISSING FILE BUT FAILURE WHILE CHECKING HLINKS - We could not find the referenced file, + * however you should check the job logs to see why we couldn't check to see if there is a pointer + * to the referenced file in our archive or another table's archive or mob area.
+ *{ public TestRowModel() throws Exception { super(RowModel.class); AS_XML = - " " + - "
"; + "" + "dGVzdHZhbHVlMQ== | " + + "
"; AS_JSON = "{\"key\":\"dGVzdHJvdzE=\",\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\"," + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java index 85abc722044..142827be70c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java @@ -157,7 +157,6 @@ public class ForeignException extends IOException { * @param bytes * @return the ForeignExcpetion instance * @throws InvalidProtocolBufferException if there was deserialization problem this is thrown. - * @throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException */ public static ForeignException deserialize(byte[] bytes) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index ed0e84deace..d106cf72382 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -533,13 +533,13 @@ public final class HFile { /** * @param fs filesystem * @param path Path to file to read - * @param cacheConf This must not be null. @see - * {@link org.apache.hadoop.hbase.io.hfile.CacheConfig#CacheConfig(Configuration)} + * @param cacheConf This must not be null. * @param primaryReplicaReader true if this is a reader for primary replica * @param conf Configuration * @return an active Reader instance * @throws IOException Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile * is corrupt/invalid. + * @see CacheConfig#CacheConfig(Configuration) */ public static Reader createReader(FileSystem fs, Path path, CacheConfig cacheConf, boolean primaryReplicaReader, Configuration conf) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java index cd347f43822..0108c145f75 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java @@ -151,11 +151,10 @@ public class HMasterCommandLine extends ServerCommandLine { if (shutDownCluster) { return stopMaster(); } - System.err.println( - "To shutdown the master run " + - "hbase-daemon.sh stop master or send a kill signal to " + - "the HMaster pid, " + - "and to stop HBase Cluster run \"stop-hbase.sh\" or \"hbase master stop --shutDownCluster\""); + System.err.println("To shutdown the master run " + + "hbase-daemon.sh stop master or send a kill signal to the HMaster pid, " + + "and to stop HBase Cluster run \"stop-hbase.sh\" or \"hbase master " + + "stop --shutDownCluster\""); return 1; } else if ("clear".equals(command)) { return (ZNodeClearer.clear(getConf()) ? 0 : 1); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java index 25b661c69ce..02863b843ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java @@ -297,9 +297,9 @@ public class MasterWalManager { splitLog(serverNames, META_FILTER); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UL_UNRELEASED_LOCK", justification= - "We only release this lock when we set it. Updates to code that uses it should verify use " + - "of the guard boolean.") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "UL_UNRELEASED_LOCK", + justification = "We only release this lock when we set it. Updates to code " + + "that uses it should verify use of the guard boolean.") ListdGVzdHZhbHVlMQ== | " + + "getLogDirs(final Set serverNames) throws IOException { List logDirs = new ArrayList<>(); boolean needReleaseLock = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index 186a8ff11bb..16185cb7949 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -16,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hbase.master; + import static org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective.CHECK; import static org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective.FORCE; import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.DELETED; @@ -155,10 +156,10 @@ public class SplitLogManager { /** * Get a list of paths that need to be split given a set of server-specific directories and * optionally a filter. - * + * * See {@link AbstractFSWALProvider#getServerNameFromWALDirectoryName} for more info on directory * layout. - * + * * Should be package-private, but is needed by * {@link org.apache.hadoop.hbase.wal.WALSplitter#split(Path, Path, Path, FileSystem, * Configuration, org.apache.hadoop.hbase.wal.WALFactory)} for tests. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 291c419bf9f..73c1ff20371 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -268,11 +268,11 @@ class SimpleRegionNormalizer implements RegionNormalizer, ConfigurationObserver } /** - * @param tableRegions regions of table to normalize + * Also make sure tableRegions contains regions of the same table + * @param tableRegions regions of table to normalize * @param tableDescriptor the TableDescriptor * @return average region size depending on - * @see org.apache.hadoop.hbase.client.TableDescriptor#getNormalizerTargetRegionCount() - * Also make sure tableRegions contains regions of the same table + * @see TableDescriptor#getNormalizerTargetRegionCount() */ private double getAverageRegionSizeMb(final List tableRegions, final TableDescriptor tableDescriptor) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java index 8157af99ba4..8d583bdb295 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java @@ -61,8 +61,9 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloneSnapshotState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; @InterfaceAudience.Private @@ -267,11 +268,10 @@ public class CloneSnapshotProcedure throws IOException { super.serializeStateData(serializer); - MasterProcedureProtos.CloneSnapshotStateData.Builder cloneSnapshotMsg = - MasterProcedureProtos.CloneSnapshotStateData.newBuilder() - .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) - .setSnapshot(this.snapshot) - .setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor)); + CloneSnapshotStateData.Builder cloneSnapshotMsg = CloneSnapshotStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) + .setSnapshot(this.snapshot) + .setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor)); cloneSnapshotMsg.setRestoreAcl(restoreAcl); if (newRegions != null) { @@ -285,11 +285,11 @@ public class CloneSnapshotProcedure while (it.hasNext()) { final Map.Entry > entry = it.next(); - MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder parentToChildrenPair = - MasterProcedureProtos.RestoreParentToChildRegionsPair.newBuilder() - .setParentRegionName(entry.getKey()) - .setChild1RegionName(entry.getValue().getFirst()) - .setChild2RegionName(entry.getValue().getSecond()); + RestoreParentToChildRegionsPair.Builder parentToChildrenPair = + RestoreParentToChildRegionsPair.newBuilder() + .setParentRegionName(entry.getKey()) + .setChild1RegionName(entry.getValue().getFirst()) + .setChild2RegionName(entry.getValue().getSecond()); cloneSnapshotMsg.addParentToChildRegionsPairList(parentToChildrenPair); } } @@ -301,8 +301,7 @@ public class CloneSnapshotProcedure throws IOException { super.deserializeStateData(serializer); - MasterProcedureProtos.CloneSnapshotStateData cloneSnapshotMsg = - serializer.deserialize(MasterProcedureProtos.CloneSnapshotStateData.class); + CloneSnapshotStateData cloneSnapshotMsg = serializer.deserialize(CloneSnapshotStateData.class); setUser(MasterProcedureUtil.toUserInfo(cloneSnapshotMsg.getUserInfo())); snapshot = cloneSnapshotMsg.getSnapshot(); tableDescriptor = ProtobufUtil.toTableDescriptor(cloneSnapshotMsg.getTableSchema()); @@ -319,8 +318,8 @@ public class CloneSnapshotProcedure } if (cloneSnapshotMsg.getParentToChildRegionsPairListCount() > 0) { parentsToChildrenPairMap = new HashMap<>(); - for (MasterProcedureProtos.RestoreParentToChildRegionsPair parentToChildrenPair: - cloneSnapshotMsg.getParentToChildRegionsPairListList()) { + for (RestoreParentToChildRegionsPair parentToChildrenPair : cloneSnapshotMsg + .getParentToChildRegionsPairListList()) { parentsToChildrenPairMap.put( parentToChildrenPair.getParentRegionName(), new Pair<>( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java index e9440621e44..8f2a9c86afb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java @@ -57,8 +57,9 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; @InterfaceAudience.Private @@ -237,11 +238,10 @@ public class RestoreSnapshotProcedure throws IOException { super.serializeStateData(serializer); - MasterProcedureProtos.RestoreSnapshotStateData.Builder restoreSnapshotMsg = - MasterProcedureProtos.RestoreSnapshotStateData.newBuilder() - .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) - .setSnapshot(this.snapshot) - .setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor)); + RestoreSnapshotStateData.Builder restoreSnapshotMsg = RestoreSnapshotStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) + .setSnapshot(this.snapshot) + .setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor)); if (regionsToRestore != null) { for (RegionInfo hri: regionsToRestore) { @@ -264,11 +264,11 @@ public class RestoreSnapshotProcedure while (it.hasNext()) { final Map.Entry > entry = it.next(); - MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder parentToChildrenPair = - MasterProcedureProtos.RestoreParentToChildRegionsPair.newBuilder() - .setParentRegionName(entry.getKey()) - .setChild1RegionName(entry.getValue().getFirst()) - .setChild2RegionName(entry.getValue().getSecond()); + RestoreParentToChildRegionsPair.Builder parentToChildrenPair = + RestoreParentToChildRegionsPair.newBuilder() + .setParentRegionName(entry.getKey()) + .setChild1RegionName(entry.getValue().getFirst()) + .setChild2RegionName(entry.getValue().getSecond()); restoreSnapshotMsg.addParentToChildRegionsPairList (parentToChildrenPair); } } @@ -281,8 +281,8 @@ public class RestoreSnapshotProcedure throws IOException { super.deserializeStateData(serializer); - MasterProcedureProtos.RestoreSnapshotStateData restoreSnapshotMsg = - serializer.deserialize(MasterProcedureProtos.RestoreSnapshotStateData.class); + RestoreSnapshotStateData restoreSnapshotMsg = + serializer.deserialize(RestoreSnapshotStateData.class); setUser(MasterProcedureUtil.toUserInfo(restoreSnapshotMsg.getUserInfo())); snapshot = restoreSnapshotMsg.getSnapshot(); modifiedTableDescriptor = @@ -313,8 +313,8 @@ public class RestoreSnapshotProcedure } } if (restoreSnapshotMsg.getParentToChildRegionsPairListCount() > 0) { - for (MasterProcedureProtos.RestoreParentToChildRegionsPair parentToChildrenPair: - restoreSnapshotMsg.getParentToChildRegionsPairListList()) { + for (RestoreParentToChildRegionsPair parentToChildrenPair : restoreSnapshotMsg + .getParentToChildRegionsPairListList()) { parentsToChildrenPairMap.put( parentToChildrenPair.getParentRegionName(), new Pair<>( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/BalancerRejectionQueueService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/BalancerRejectionQueueService.java index 6da708381af..9e922ee4393 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/BalancerRejectionQueueService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/BalancerRejectionQueueService.java @@ -79,8 +79,8 @@ public class BalancerRejectionQueueService implements NamedQueueService { return; } if (!(namedQueuePayload instanceof BalancerRejectionDetails)) { - LOG.warn( - "BalancerRejectionQueueService: NamedQueuePayload is not of type BalancerRejectionDetails."); + LOG.warn("BalancerRejectionQueueService: NamedQueuePayload is not of type" + + " BalancerRejectionDetails."); return; } BalancerRejectionDetails balancerRejectionDetails = (BalancerRejectionDetails) namedQueuePayload; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java index b285e049bdf..e17f8996431 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java @@ -34,9 +34,10 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; */ @InterfaceAudience.Private @InterfaceStability.Evolving -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", - justification="FindBugs seems confused; says bypassGlobals, namepaceLimiters, and " + - "tableLimiters are mostly synchronized...but to me it looks like they are totally synchronized") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC", + justification = "FindBugs seems confused; says bypassGlobals, namepaceLimiters, and " + + "tableLimiters are mostly synchronized..." + + "but to me it looks like they are totally synchronized") public class UserQuotaState extends QuotaState { private Map namespaceLimiters = null; private Map tableLimiters = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java index 56dab21baf2..4ffb600dd09 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java @@ -55,9 +55,11 @@ public abstract class AbstractMemStore implements MemStore { protected RegionServicesForStores regionServices; + // @formatter:off public final static long FIXED_OVERHEAD = (long) ClassSize.OBJECT - + (5 * ClassSize.REFERENCE) - + (2 * Bytes.SIZEOF_LONG); // snapshotId, timeOfOldestEdit + + (5 * ClassSize.REFERENCE) + + (2 * Bytes.SIZEOF_LONG); // snapshotId, timeOfOldestEdit + // @formatter:on public final static long DEEP_OVERHEAD = FIXED_OVERHEAD; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 63eba8b1a30..01904c685d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -2057,8 +2057,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, throw new ServiceException(ie); } // We are assigning meta, wait a little for regionserver to finish initialization. - int timeout = regionServer.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT) >> 2; // Quarter of RPC timeout + // Default to quarter of RPC timeout + int timeout = regionServer.getConfiguration() + .getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT) >> 2; long endTime = System.currentTimeMillis() + timeout; synchronized (regionServer.online) { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index a1d082e9701..5ac86970c9d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -200,10 +200,10 @@ public class ReplicationSource implements ReplicationSourceInterface { this.waitOnEndpointSeconds = this.conf.getInt(WAIT_ON_ENDPOINT_SECONDS, DEFAULT_WAIT_ON_ENDPOINT_SECONDS); decorateConf(); - this.sleepForRetries = - this.conf.getLong("replication.source.sleepforretries", 1000); // 1 second - this.maxRetriesMultiplier = - this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes @ 1 sec per + // 1 second + this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); + // 5 minutes @ 1 sec per + this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); this.queueSizePerGroup = this.conf.getInt("hbase.regionserver.maxlogs", 32); this.logQueue = new ReplicationSourceLogQueue(conf, metrics, this); this.queueStorage = queueStorage; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java index f188e7ba50d..a5c7b16f529 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java @@ -80,14 +80,15 @@ public class ReplicationSourceShipper extends Thread { this.walGroupId = walGroupId; this.logQueue = logQueue; this.source = source; - this.sleepForRetries = - this.conf.getLong("replication.source.sleepforretries", 1000); // 1 second - this.maxRetriesMultiplier = - this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes @ 1 sec per + // 1 second + this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); + // 5 minutes @ 1 sec per + this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); + // 20 seconds this.getEntriesTimeout = - this.conf.getInt("replication.source.getEntries.timeout", DEFAULT_TIMEOUT); // 20 seconds + this.conf.getInt("replication.source.getEntries.timeout", DEFAULT_TIMEOUT); this.shipEditsTimeout = this.conf.getInt(HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT, - HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT); + HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java index 11090448c7c..c61494e12c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java @@ -106,10 +106,10 @@ class ReplicationSourceWALReader extends Thread { int batchCount = conf.getInt("replication.source.nb.batches", 1); this.totalBufferUsed = source.getSourceManager().getTotalBufferUsed(); this.totalBufferQuota = source.getSourceManager().getTotalBufferLimit(); - this.sleepForRetries = - this.conf.getLong("replication.source.sleepforretries", 1000); // 1 second - this.maxRetriesMultiplier = - this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes @ 1 sec per + // 1 second + this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); + // 5 minutes @ 1 sec per + this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); this.eofAutoRecovery = conf.getBoolean("replication.source.eof.autorecovery", false); this.entryBatchQueue = new LinkedBlockingQueue<>(batchCount); this.walGroupId = walGroupId; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java index f540b536ee2..3f47a3cba5f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java @@ -72,7 +72,7 @@ public class TokenUtil { /** - * See {@link ClientTokenUtil#toToken(org.apache.hadoop.security.token.Token)}. + * See {@link ClientTokenUtil#toToken(Token)}. * @deprecated External users should not use this method. Please post on * the HBase dev mailing list if you need this method. Internal * HBase code should use {@link ClientTokenUtil} instead. @@ -83,8 +83,7 @@ public class TokenUtil { } /** - * See {@link ClientTokenUtil#obtainToken(org.apache.hadoop.hbase.client.Connection, - * org.apache.hadoop.hbase.security.User)}. + * See {@link ClientTokenUtil#obtainToken(Connection, User)}. * @deprecated External users should not use this method. Please post on * the HBase dev mailing list if you need this method. Internal * HBase code should use {@link ClientTokenUtil} instead. @@ -96,8 +95,7 @@ public class TokenUtil { } /** - * See {@link ClientTokenUtil#obtainAndCacheToken(org.apache.hadoop.hbase.client.Connection, - * org.apache.hadoop.hbase.security.User)}. + * See {@link ClientTokenUtil#obtainAndCacheToken(Connection, User)}. */ public static void obtainAndCacheToken(final Connection conn, User user) @@ -106,7 +104,7 @@ public class TokenUtil { } /** - * See {@link ClientTokenUtil#toToken(org.apache.hadoop.security.token.Token)}. + * See {@link ClientTokenUtil#toToken(org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.Token)}. * @deprecated External users should not use this method. Please post on * the HBase dev mailing list if you need this method. Internal * HBase code should use {@link ClientTokenUtil} instead. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java index 908d3e6db77..4c1e5be695c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java @@ -167,8 +167,8 @@ public class EntryBuffers { internify(entry); entries.add(entry); // TODO linkedlist entry - long incrHeap = entry.getEdit().heapSize() + - ClassSize.align(2 * ClassSize.REFERENCE); // WALKey pointers + // entry size plus WALKey pointers + long incrHeap = entry.getEdit().heapSize() + ClassSize.align(2 * ClassSize.REFERENCE); heapInBuffer += incrHeap; return incrHeap; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 7c11124711c..ff386cfa232 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -2920,9 +2920,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { if (jobConf == null) { jobConf = mrCluster.createJobConf(); } - - jobConf.set("mapreduce.cluster.local.dir", - conf.get("mapreduce.cluster.local.dir")); //Hadoop MiniMR overwrites this while it should not + // Hadoop MiniMR overwrites this while it should not + jobConf.set("mapreduce.cluster.local.dir", conf.get("mapreduce.cluster.local.dir")); LOG.info("Mini mapreduce cluster started"); // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java index 9ab021aeb9d..53a84366d1d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java @@ -223,8 +223,9 @@ public class TestRegionObserverScannerOpenHook { Get get = new Get(ROW); Result r = region.get(get); assertNull( - "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " - + r, r.listCells()); + "Got an unexpected number of rows - " + + "no data should be returned with the NoDataFromScan coprocessor. Found: " + r, + r.listCells()); HBaseTestingUtility.closeRegionAndWAL(region); } @@ -250,8 +251,9 @@ public class TestRegionObserverScannerOpenHook { Get get = new Get(ROW); Result r = region.get(get); assertNull( - "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " - + r, r.listCells()); + "Got an unexpected number of rows - " + + "no data should be returned with the NoDataFromScan coprocessor. Found: " + r, + r.listCells()); HBaseTestingUtility.closeRegionAndWAL(region); } @@ -269,15 +271,19 @@ public class TestRegionObserverScannerOpenHook { } public CountDownLatch getCompactionStateChangeLatch() { - if (compactionStateChangeLatch == null) compactionStateChangeLatch = new CountDownLatch(1); + if (compactionStateChangeLatch == null) { + compactionStateChangeLatch = new CountDownLatch(1); + } return compactionStateChangeLatch; } @Override public boolean compact(CompactionContext compaction, HStore store, - ThroughputController throughputController) throws IOException { + ThroughputController throughputController) throws IOException { boolean ret = super.compact(compaction, store, throughputController); - if (ret) compactionStateChangeLatch.countDown(); + if (ret) { + compactionStateChangeLatch.countDown(); + } return ret; } @@ -341,14 +347,16 @@ public class TestRegionObserverScannerOpenHook { Get get = new Get(ROW); Result r = table.get(get); assertNull( - "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " - + r, r.listCells()); + "Got an unexpected number of rows - " + + "no data should be returned with the NoDataFromScan coprocessor. Found: " + r, + r.listCells()); get = new Get(Bytes.toBytes("anotherrow")); r = table.get(get); assertNull( - "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor Found: " - + r, r.listCells()); + "Got an unexpected number of rows - " + + "no data should be returned with the NoDataFromScan coprocessor Found: " + r, + r.listCells()); table.close(); UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java index a42410a2aab..289ed670841 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java @@ -190,8 +190,9 @@ public class TestFavoredNodeAssignmentHelper { // the primary can be assigned but the secondary/tertiary would be null Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 1); - Triple