> getFamilyMap() {
return this.familyMap;
@@ -638,16 +637,12 @@ public class Scan extends Query {
return this.caching;
}
- /**
- * n
- */
+ /** Returns TimeRange */
public TimeRange getTimeRange() {
return this.tr;
}
- /**
- * n
- */
+ /** Returns RowFilter */
@Override
public Filter getFilter() {
return filter;
@@ -682,7 +677,7 @@ public class Scan extends Query {
* Set whether this scan is a reversed one
*
* This is false by default which means forward(normal) scan.
- * @param reversed if true, scan will be backward order n
+ * @param reversed if true, scan will be backward order
*/
public Scan setReversed(boolean reversed) {
this.reversed = reversed;
@@ -701,7 +696,8 @@ public class Scan extends Query {
* Setting whether the caller wants to see the partial results when server returns
* less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. By
* default this value is false and the complete results will be assembled client side before being
- * delivered to the caller. nn * @see Result#mayHaveMoreCellsInRow()
+ * delivered to the caller.
+ * @see Result#mayHaveMoreCellsInRow()
* @see #setBatch(int)
*/
public Scan setAllowPartialResults(final boolean allowPartialResults) {
@@ -725,7 +721,7 @@ public class Scan extends Query {
/**
* Compile the table and column family (i.e. schema) information into a String. Useful for parsing
- * and aggregation by debugging, logging, and administration tools. n
+ * and aggregation by debugging, logging, and administration tools.
*/
@Override
public Map getFingerprint() {
@@ -747,7 +743,7 @@ public class Scan extends Query {
* Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
* Map along with the fingerprinted information. Useful for debugging, logging, and administration
* tools.
- * @param maxCols a limit on the number of columns output prior to truncation n
+ * @param maxCols a limit on the number of columns output prior to truncation
*/
@Override
public Map toMap(int maxCols) {
@@ -904,7 +900,7 @@ public class Scan extends Query {
* reaches this value.
*
* This condition will be tested at last, after all other conditions such as stopRow, filter, etc.
- * @param limit the limit of rows for this scan n
+ * @param limit the limit of rows for this scan
*/
public Scan setLimit(int limit) {
this.limit = limit;
@@ -913,7 +909,7 @@ public class Scan extends Query {
/**
* Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also
- * set {@code readType} to {@link ReadType#PREAD}. n
+ * set {@code readType} to {@link ReadType#PREAD}.
*/
public Scan setOneRowLimit() {
return setLimit(1).setReadType(ReadType.PREAD);
@@ -935,7 +931,7 @@ public class Scan extends Query {
* Set the read type for this scan.
*
* Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For
- * example, we will always use pread if this is a get scan. n
+ * example, we will always use pread if this is a get scan.
*/
public Scan setReadType(ReadType readType) {
this.readType = readType;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
index 53c33a667c3..7feefc831ca 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
@@ -116,7 +116,8 @@ public interface Table extends Closeable {
* @param results Empty Object[], same size as actions. Provides access to partial results, in
* case an exception is thrown. A null in the result array means that the call for
* that action failed, even after retries. The order of the objects in the results
- * array corresponds to the order of actions in the request list. n * @since 0.90.0
+ * array corresponds to the order of actions in the request list.
+ * @since 0.90.0
*/
default void batch(final List extends Row> actions, final Object[] results)
throws IOException, InterruptedException {
@@ -264,8 +265,8 @@ public interface Table extends Closeable {
* @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also,
* {@link #put(List)} runs pre-flight validations on the input list on client. Currently
* {@link #delete(List)} doesn't run validations on the client, there is no need
- * currently, but this may change in the future. An * {@link IllegalArgumentException}
- * will be thrown in this case.
+ * currently, but this may change in the future. An {@link IllegalArgumentException} will
+ * be thrown in this case.
*/
default void delete(List deletes) throws IOException {
throw new NotImplementedException("Add an implementation!");
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
index f500a1128a5..1c91819ac4b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
@@ -153,7 +153,7 @@ public interface TableDescriptor {
String getRegionSplitPolicyClassName();
/**
- * Get the name of the table n
+ * Get the name of the table
*/
TableName getTableName();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
index d0d3e36aa8f..43ca935ffa1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
@@ -886,7 +886,7 @@ public class TableDescriptorBuilder {
}
/**
- * Get the name of the table n
+ * Get the name of the table
*/
@Override
public TableName getTableName() {
@@ -1299,7 +1299,8 @@ public class TableDescriptorBuilder {
* org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class
* can be loaded or not. Whether a coprocessor is loadable or not will be determined when a
* region is opened.
- * @param className Full class name. n * @return the modifyable TD
+ * @param className Full class name.
+ * @return the modifyable TD
*/
public ModifyableTableDescriptor setCoprocessor(String className) throws IOException {
return setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className)
@@ -1347,8 +1348,8 @@ public class TableDescriptorBuilder {
* org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class
* can be loaded or not. Whether a coprocessor is loadable or not will be determined when a
* region is opened.
- * @param specStr The Coprocessor specification all in in one String n * @return the modifyable
- * TD
+ * @param specStr The Coprocessor specification all in in one String
+ * @return the modifyable TD
* @deprecated used by HTableDescriptor and admin.rb. As of release 2.0.0, this will be removed
* in HBase 3.0.0.
*/
@@ -1461,8 +1462,8 @@ public class TableDescriptorBuilder {
/**
* @param bytes A pb serialized {@link ModifyableTableDescriptor} instance with pb magic prefix
- * @return An instance of {@link ModifyableTableDescriptor} made from bytes
n
- * * @see #toByteArray()
+ * @return An instance of {@link ModifyableTableDescriptor} made from bytes
+ * @see #toByteArray()
*/
private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException {
if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
index 4e20302be45..bf54f6e5904 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
@@ -42,7 +42,7 @@ public class TableState {
/**
* Covert from PB version of State
- * @param state convert from n
+ * @param state convert from
*/
public static State convert(HBaseProtos.TableState.State state) {
State ret;
@@ -66,7 +66,7 @@ public class TableState {
}
/**
- * Covert to PB version of State n
+ * Covert to PB version of State
*/
public HBaseProtos.TableState.State convert() {
HBaseProtos.TableState.State state;
@@ -140,7 +140,7 @@ public class TableState {
}
/**
- * Table name for state n
+ * Table name for state
*/
public TableName getTableName() {
return tableName;
@@ -168,7 +168,7 @@ public class TableState {
}
/**
- * Covert to PB version of TableState n
+ * Covert to PB version of TableState
*/
public HBaseProtos.TableState convert() {
return HBaseProtos.TableState.newBuilder().setState(this.state.convert()).build();
@@ -177,7 +177,7 @@ public class TableState {
/**
* Covert from PB version of TableState
* @param tableName table this state of
- * @param tableState convert from n
+ * @param tableState convert from
*/
public static TableState convert(TableName tableName, HBaseProtos.TableState tableState) {
TableState.State state = State.convert(tableState.getState());
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
index ab5915ec975..76a0d6addf3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
@@ -33,7 +33,7 @@ public class ServerStatistics {
/**
* Good enough attempt. Last writer wins. It doesn't really matter which one gets to update, as
- * something gets set nn
+ * something gets set
*/
public void update(byte[] region, RegionLoadStats currentStats) {
RegionStatistics regionStat = this.stats.get(region);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
index 7a266de3345..c705463b62c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
@@ -35,8 +35,8 @@ public class ServerSideScanMetrics {
private final Map counters = new HashMap<>();
/**
- * Create a new counter with the specified name n * @return {@link AtomicLong} instance for the
- * counter with counterName
+ * Create a new counter with the specified name
+ * @return {@link AtomicLong} instance for the counter with counterName
*/
protected AtomicLong createCounter(String counterName) {
AtomicLong c = new AtomicLong(0);
@@ -59,9 +59,6 @@ public class ServerSideScanMetrics {
*/
public final AtomicLong countOfRowsScanned = createCounter(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME);
- /**
- * nn
- */
public void setCounter(String counterName, long value) {
AtomicLong c = this.counters.get(counterName);
if (c != null) {
@@ -69,23 +66,16 @@ public class ServerSideScanMetrics {
}
}
- /**
- * n * @return true if a counter exists with the counterName
- */
+ /** Returns true if a counter exists with the counterName */
public boolean hasCounter(String counterName) {
return this.counters.containsKey(counterName);
}
- /**
- * n * @return {@link AtomicLong} instance for this counter name, null if counter does not exist.
- */
+ /** Returns {@link AtomicLong} instance for this counter name, null if counter does not exist. */
public AtomicLong getCounter(String counterName) {
return this.counters.get(counterName);
}
- /**
- * nn
- */
public void addToCounter(String counterName, long delta) {
AtomicLong c = this.counters.get(counterName);
if (c != null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
index 73e3b53eb36..c8eab212446 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
@@ -52,36 +52,27 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Message;
public abstract class ColumnInterpreter {
- /**
- * nnn * @return value of type T n
- */
+ /** Returns value of type T */
public abstract T getValue(byte[] colFamily, byte[] colQualifier, Cell c) throws IOException;
- /**
- * nn * @return sum or non null value among (if either of them is null); otherwise returns a null.
- */
+ /** Returns sum or non null value among (if either of them is null); otherwise returns a null. */
public abstract S add(S l1, S l2);
/**
- * returns the maximum value for this type T n
+ * returns the maximum value for this type T
*/
-
public abstract T getMaxValue();
public abstract T getMinValue();
- /**
- * nnn
- */
+ /** Returns multiplication */
public abstract S multiply(S o1, S o2);
- /**
- * nn
- */
+ /** Returns increment */
public abstract S increment(S o);
/**
- * provides casting opportunity between the data types. nn
+ * provides casting opportunity between the data types.
*/
public abstract S castToReturnType(T o);
@@ -96,7 +87,7 @@ public abstract class ColumnInterpreter clazz, String s) {
super("Coprocessor [" + clazz.getName() + "]: " + s);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
index fd9936dc502..5f2b98c8370 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
@@ -150,7 +150,7 @@ public final class ClientExceptionsUtil {
/**
* Translates exception for preemptive fast fail checks.
* @param t exception to check
- * @return translated exception n
+ * @return translated exception
*/
public static Throwable translatePFFE(Throwable t) throws IOException {
if (t instanceof NoSuchMethodError) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
index ae15777a7f0..00774e37094 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
@@ -35,15 +35,13 @@ public class FailedSanityCheckException extends org.apache.hadoop.hbase.DoNotRet
}
/**
- * n
- */
+ * */
public FailedSanityCheckException(String message) {
super(message);
}
/**
- * nn
- */
+ * */
public FailedSanityCheckException(String message, Throwable cause) {
super(message, cause);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
index e7c06d44aef..1991100d0da 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
@@ -68,9 +68,7 @@ public class ColumnValueFilter extends FilterBase {
this.comparator = Preconditions.checkNotNull(comparator, "Comparator should not be null");
}
- /**
- * n
- */
+ /** Returns operator */
public CompareOperator getCompareOperator() {
return op;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
index a5f5efcaba1..8140793fc77 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
@@ -216,8 +216,9 @@ public abstract class Filter {
/**
* Concrete implementers can signal a failure condition in their code by throwing an
- * {@link IOException}. n * @return true if and only if the fields of the filter that are
- * serialized are equal to the corresponding fields in other. Used for testing.
+ * {@link IOException}.
+ * @return true if and only if the fields of the filter that are serialized are equal to the
+ * corresponding fields in other. Used for testing.
*/
abstract boolean areSerializedFieldsEqual(Filter other);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
index ff637c7f052..713c4acb270 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
@@ -134,9 +134,9 @@ public abstract class FilterBase extends Filter {
}
/**
- * Default implementation so that writers of custom filters aren't forced to implement. n
- * * @return true if and only if the fields of the filter that are serialized are equal to the
- * corresponding fields in other. Used for testing.
+ * Default implementation so that writers of custom filters aren't forced to implement.
+ * @return true if and only if the fields of the filter that are serialized are equal to the
+ * corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter other) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
index 3b7c136c6e1..cb42072e1d8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
@@ -84,7 +84,7 @@ final public class FilterList extends FilterBase {
/**
* Constructor that takes a var arg number of {@link Filter}s. The default operator MUST_PASS_ALL
- * is assumed. n
+ * is assumed.
*/
public FilterList(final Filter... filters) {
this(Operator.MUST_PASS_ALL, Arrays.asList(filters));
@@ -108,14 +108,14 @@ final public class FilterList extends FilterBase {
}
/**
- * Get the operator. n
+ * Get the operator.
*/
public Operator getOperator() {
return operator;
}
/**
- * Get the filters. n
+ * Get the filters.
*/
public List getFilters() {
return filterListBase.getFilters();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
index 4a15af27726..9b0fd99dc94 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
@@ -92,7 +92,7 @@ public abstract class FilterListBase extends FilterBase {
* the current child, we should set the traverse result (transformed cell) of previous node(s) as
* the initial value. (HBASE-18879).
* @param c The cell in question.
- * @return the transformed cell. n
+ * @return the transformed cell.
*/
@Override
public Cell transformCell(Cell c) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
index 1506eca5df6..2feac5527f7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
@@ -116,7 +116,8 @@ public class FuzzyRowFilter extends FilterBase {
/**
* We need to preprocess mask array, as since we treat 2's as unfixed positions and -1 (0xff) as
- * fixed positions n * @return mask array
+ * fixed positions
+ * @return mask array
*/
private byte[] preprocessMask(byte[] mask) {
if (!UNSAFE_UNALIGNED) {
@@ -588,8 +589,8 @@ public class FuzzyRowFilter extends FilterBase {
/**
* For forward scanner, next cell hint should not contain any trailing zeroes unless they are part
- * of fuzzyKeyMeta hint = '\x01\x01\x01\x00\x00' will skip valid row '\x01\x01\x01' nn * @param
- * toInc - position of incremented byte
+ * of fuzzyKeyMeta hint = '\x01\x01\x01\x00\x00' will skip valid row '\x01\x01\x01'
+ * @param toInc - position of incremented byte
* @return trimmed version of result
*/
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
index 099f38026fe..1fdf051941a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
@@ -37,7 +37,7 @@ public class RandomRowFilter extends FilterBase {
protected boolean filterOutRow;
/**
- * Create a new filter with a specified chance for a row to be included. n
+ * Create a new filter with a specified chance for a row to be included.
*/
public RandomRowFilter(float chance) {
this.chance = chance;
@@ -49,7 +49,7 @@ public class RandomRowFilter extends FilterBase {
}
/**
- * Set the chance that a row is included. n
+ * Set the chance that a row is included.
*/
public void setChance(float chance) {
this.chance = chance;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
index 14bdc04a754..3293a2106a9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
@@ -71,7 +71,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter {
}
/**
- * Constructor for protobuf deserialization only. nnnnnn
+ * Constructor for protobuf deserialization only.
*/
protected SingleColumnValueExcludeFilter(final byte[] family, final byte[] qualifier,
final CompareOperator op, ByteArrayComparable comparator, final boolean filterIfMissing,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
index 7be5ce91405..43b3316db77 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
@@ -111,7 +111,7 @@ public class SingleColumnValueFilter extends FilterBase {
}
/**
- * Constructor for protobuf deserialization only. nnnnnn
+ * Constructor for protobuf deserialization only.
*/
protected SingleColumnValueFilter(final byte[] family, final byte[] qualifier,
final CompareOperator op, org.apache.hadoop.hbase.filter.ByteArrayComparable comparator,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
index b3f821d75e4..235691ef7cb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
@@ -54,7 +54,7 @@ public class TimestampsFilter extends FilterBase {
long minTimestamp = Long.MAX_VALUE;
/**
- * Constructor for filter that retains only the specified timestamps in the list. n
+ * Constructor for filter that retains only the specified timestamps in the list.
*/
public TimestampsFilter(List timestamps) {
this(timestamps, false);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
index b2b3698aa2c..e7364ca3b42 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
@@ -104,9 +104,10 @@ class CellBlockBuilder {
/**
* Puts CellScanner Cells into a cell block using passed in codec
and/or
- * compressor
. nnn * @return Null or byte buffer filled with a cellblock filled with
- * passed-in Cells encoded using passed in codec
and/or compressor
; the
- * returned buffer has been flipped and is ready for reading. Use limit to find total size. n
+ * compressor
.
+ * @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
+ * passed in codec
and/or compressor
; the returned buffer has
+ * been flipped and is ready for reading. Use limit to find total size.
*/
public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
final CellScanner cellScanner) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
index d63f28cdab8..155c721b98a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
@@ -34,8 +34,7 @@ public class LeaseException extends DoNotRetryIOException {
}
/**
- * n
- */
+ * */
public LeaseException(String message) {
super(message);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
index 2e2a3a895ce..c0330034810 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
@@ -33,8 +33,7 @@ public class FailedLogCloseException extends IOException {
}
/**
- * n
- */
+ * */
public FailedLogCloseException(String msg) {
super(msg);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java
index feab0b07f2f..a2a43203b64 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java
@@ -32,8 +32,7 @@ public class FailedSyncBeforeLogCloseException extends FailedLogCloseException {
}
/**
- * n
- */
+ * */
public FailedSyncBeforeLogCloseException(String msg) {
super(msg);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java
index 92ca03945aa..87b2287a601 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java
@@ -50,7 +50,7 @@ public abstract class AbstractHBaseSaslRpcClient {
* @param token token to use if needed by the authentication method
* @param serverAddr the address of the hbase service
* @param securityInfo the security details for the remote hbase service
- * @param fallbackAllowed does the client allow fallback to simple authentication n
+ * @param fallbackAllowed does the client allow fallback to simple authentication
*/
protected AbstractHBaseSaslRpcClient(Configuration conf,
SaslClientAuthenticationProvider provider, Token extends TokenIdentifier> token,
@@ -66,7 +66,7 @@ public abstract class AbstractHBaseSaslRpcClient {
* @param serverAddr the address of the hbase service
* @param securityInfo the security details for the remote hbase service
* @param fallbackAllowed does the client allow fallback to simple authentication
- * @param rpcProtection the protection level ("authentication", "integrity" or "privacy") n
+ * @param rpcProtection the protection level ("authentication", "integrity" or "privacy")
*/
protected AbstractHBaseSaslRpcClient(Configuration conf,
SaslClientAuthenticationProvider provider, Token extends TokenIdentifier> token,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
index 5a816877ba8..6c755f9a94c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
@@ -62,7 +62,7 @@ public final class EncryptionUtil {
* @param conf configuration
* @param key the raw key bytes
* @param algorithm the algorithm to use with this key material
- * @return the encrypted key bytes n
+ * @return the encrypted key bytes
*/
public static byte[] wrapKey(Configuration conf, byte[] key, String algorithm)
throws IOException {
@@ -115,7 +115,7 @@ public final class EncryptionUtil {
* @param conf configuration
* @param subject subject key alias
* @param value the encrypted key bytes
- * @return the raw key bytes nn
+ * @return the raw key bytes
*/
public static Key unwrapKey(Configuration conf, String subject, byte[] value)
throws IOException, KeyException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
index 93ad9245f65..0394bb0f2a3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
@@ -86,7 +86,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient {
* Do client side SASL authentication with server via the given InputStream and OutputStream
* @param inS InputStream to use
* @param outS OutputStream to use
- * @return true if connection is set up, or false if needs to switch to simple Auth. n
+ * @return true if connection is set up, or false if needs to switch to simple Auth.
*/
public boolean saslConnect(InputStream inS, OutputStream outS) throws IOException {
DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS));
@@ -185,7 +185,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient {
/**
* Get a SASL wrapped InputStream. Can be called only after saslConnect() has been called.
- * @return a SASL wrapped InputStream n
+ * @return a SASL wrapped InputStream
*/
public InputStream getInputStream() throws IOException {
if (!saslClient.isComplete()) {
@@ -248,7 +248,7 @@ public class HBaseSaslRpcClient extends AbstractHBaseSaslRpcClient {
/**
* Get a SASL wrapped OutputStream. Can be called only after saslConnect() has been called.
- * @return a SASL wrapped OutputStream n
+ * @return a SASL wrapped OutputStream
*/
public OutputStream getOutputStream() throws IOException {
if (!saslClient.isComplete()) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
index e30041d46c4..2ea60f8ed57 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
@@ -45,7 +45,7 @@ public class AccessControlClient {
/**
* Return true if authorization is supported and enabled
* @param connection The connection to use
- * @return true if authorization is supported and enabled, false otherwise n
+ * @return true if authorization is supported and enabled, false otherwise
*/
public static boolean isAuthorizationEnabled(Connection connection) throws IOException {
return connection.getAdmin().getSecurityCapabilities()
@@ -55,7 +55,7 @@ public class AccessControlClient {
/**
* Return true if cell authorization is supported and enabled
* @param connection The connection to use
- * @return true if cell authorization is supported and enabled, false otherwise n
+ * @return true if cell authorization is supported and enabled, false otherwise
*/
public static boolean isCellAuthorizationEnabled(Connection connection) throws IOException {
return connection.getAdmin().getSecurityCapabilities()
@@ -146,7 +146,7 @@ public class AccessControlClient {
/**
* Grant global permissions for the specified user. If permissions for the specified user exists,
- * later granted permissions will override previous granted permissions. nnnn
+ * later granted permissions will override previous granted permissions.
*/
public static void grant(Connection connection, final String userName,
final Permission.Action... actions) throws Throwable {
@@ -162,7 +162,7 @@ public class AccessControlClient {
/**
* Revokes the permission on the table
- * @param connection The Connection instance to use nnnnnn
+ * @param connection The Connection instance to use
*/
public static void revoke(Connection connection, final TableName tableName, final String username,
final byte[] family, final byte[] qualifier, final Permission.Action... actions)
@@ -173,7 +173,7 @@ public class AccessControlClient {
/**
* Revokes the permission on the namespace for the specified user.
- * @param connection The Connection instance to use nnnn
+ * @param connection The Connection instance to use
*/
public static void revoke(Connection connection, final String namespace, final String userName,
final Permission.Action... actions) throws Throwable {
@@ -197,7 +197,7 @@ public class AccessControlClient {
* along with the list of superusers would be returned. Else, no rows get returned.
* @param connection The Connection instance to use
* @param tableRegex The regular expression string to match against
- * @return List of UserPermissions n
+ * @return List of UserPermissions
*/
public static List getUserPermissions(Connection connection, String tableRegex)
throws Throwable {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
index e0eb79aa025..970c3f2b04d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
@@ -450,8 +450,8 @@ public class AccessControlUtil {
* It's also called by the shell, in case you want to find references.
* @param protocol the AccessControlService protocol proxy
* @param userShortName the short name of the user to grant permissions
- * @param actions the permissions to be granted n * @deprecated Use
- * {@link Admin#grant(UserPermission, boolean)} instead.
+ * @param actions the permissions to be granted
+ * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
*/
@Deprecated
public static void grant(RpcController controller,
@@ -478,8 +478,8 @@ public class AccessControlUtil {
* @param tableName optional table name
* @param f optional column family
* @param q optional qualifier
- * @param actions the permissions to be granted n * @deprecated Use
- * {@link Admin#grant(UserPermission, boolean)} instead.
+ * @param actions the permissions to be granted
+ * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
*/
@Deprecated
public static void grant(RpcController controller,
@@ -504,8 +504,8 @@ public class AccessControlUtil {
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
* @param namespace the short name of the user to grant permissions
- * @param actions the permissions to be granted n * @deprecated Use
- * {@link Admin#grant(UserPermission, boolean)} instead.
+ * @param actions the permissions to be granted
+ * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
*/
@Deprecated
public static void grant(RpcController controller,
@@ -621,9 +621,8 @@ public class AccessControlUtil {
* A utility used to get user's global permissions based on the specified user name.
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
- * @param userName User name, if empty then all user permissions will be retrieved. n
- * * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)}
- * instead.
+ * @param userName User name, if empty then all user permissions will be retrieved.
+ * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List getUserPermissions(RpcController controller,
@@ -651,8 +650,8 @@ public class AccessControlUtil {
* It's also called by the shell, in case you want to find references.
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
- * @param t optional table name n * @deprecated Use
- * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
+ * @param t optional table name
+ * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List getUserPermissions(RpcController controller,
@@ -668,9 +667,8 @@ public class AccessControlUtil {
* @param t optional table name
* @param columnFamily Column family
* @param columnQualifier Column qualifier
- * @param userName User name, if empty then all user permissions will be retrieved. n
- * * @deprecated Use
- * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
+ * @param userName User name, if empty then all user permissions will be retrieved.
+ * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List getUserPermissions(RpcController controller,
@@ -708,8 +706,8 @@ public class AccessControlUtil {
* It's also called by the shell, in case you want to find references.
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
- * @param namespace name of the namespace n * @deprecated Use
- * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
+ * @param namespace name of the namespace
+ * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List getUserPermissions(RpcController controller,
@@ -722,9 +720,8 @@ public class AccessControlUtil {
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
* @param namespace name of the namespace
- * @param userName User name, if empty then all user permissions will be retrieved. n
- * * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)}
- * instead.
+ * @param userName User name, if empty then all user permissions will be retrieved.
+ * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List getUserPermissions(RpcController controller,
@@ -762,8 +759,8 @@ public class AccessControlUtil {
* will not be considered if columnFamily is passed as null or empty.
* @param userName User name, it shouldn't be null or empty.
* @param actions Actions
- * @return true if access allowed, otherwise false n * @deprecated Use
- * {@link Admin#hasUserPermissions(String, List)} instead.
+ * @return true if access allowed, otherwise false
+ * @deprecated Use {@link Admin#hasUserPermissions(String, List)} instead.
*/
@Deprecated
public static boolean hasPermission(RpcController controller,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
index 7bae98d59ba..931f976f2f4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
@@ -55,7 +55,7 @@ public class VisibilityClient {
/**
* Return true if cell visibility features are supported and enabled
* @param connection The connection to use
- * @return true if cell visibility features are supported and enabled, false otherwise n
+ * @return true if cell visibility features are supported and enabled, false otherwise
*/
public static boolean isCellVisibilityEnabled(Connection connection) throws IOException {
return connection.getAdmin().getSecurityCapabilities()
@@ -63,7 +63,7 @@ public class VisibilityClient {
}
/**
- * Utility method for adding label to the system. nnnn
+ * Utility method for adding label to the system.
*/
public static VisibilityLabelsResponse addLabel(Connection connection, final String label)
throws Throwable {
@@ -71,7 +71,7 @@ public class VisibilityClient {
}
/**
- * Utility method for adding labels to the system. nnnn
+ * Utility method for adding labels to the system.
*/
public static VisibilityLabelsResponse addLabels(Connection connection, final String[] labels)
throws Throwable {
@@ -109,7 +109,7 @@ public class VisibilityClient {
}
/**
- * Sets given labels globally authorized for the user. nnnnn
+ * Sets given labels globally authorized for the user.
*/
public static VisibilityLabelsResponse setAuths(Connection connection, final String[] auths,
final String user) throws Throwable {
@@ -154,7 +154,7 @@ public class VisibilityClient {
* Retrieve the list of visibility labels defined in the system.
* @param connection The Connection instance to use.
* @param regex The regular expression to filter which labels are returned.
- * @return labels The list of visibility labels defined in the system. n
+ * @return labels The list of visibility labels defined in the system.
*/
public static ListLabelsResponse listLabels(Connection connection, final String regex)
throws Throwable {
@@ -190,7 +190,7 @@ public class VisibilityClient {
}
/**
- * Removes given labels from user's globally authorized list of labels. nnnnn
+ * Removes given labels from user's globally authorized list of labels.
*/
public static VisibilityLabelsResponse clearAuths(Connection connection, final String[] auths,
final String user) throws Throwable {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 35c361be562..079ddbb4218 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -562,7 +562,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer Get to a client Get
* @param proto the protocol buffer Get to convert
- * @return the converted client Get n
+ * @return the converted client Get
*/
public static Get toGet(final ClientProtos.Get proto) throws IOException {
if (proto == null) return null;
@@ -647,7 +647,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer Mutate to a Put.
* @param proto The protocol buffer MutationProto to convert
- * @return A client Put. n
+ * @return A client Put.
*/
public static Put toPut(final MutationProto proto) throws IOException {
return toPut(proto, null);
@@ -657,7 +657,7 @@ public final class ProtobufUtil {
* Convert a protocol buffer Mutate to a Put.
* @param proto The protocol buffer MutationProto to convert
* @param cellScanner If non-null, the Cell data that goes with this proto.
- * @return A client Put. n
+ * @return A client Put.
*/
public static Put toPut(final MutationProto proto, final CellScanner cellScanner)
throws IOException {
@@ -741,7 +741,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer Mutate to a Delete
* @param proto the protocol buffer Mutate to convert
- * @return the converted client Delete n
+ * @return the converted client Delete
*/
public static Delete toDelete(final MutationProto proto) throws IOException {
return toDelete(proto, null);
@@ -751,7 +751,7 @@ public final class ProtobufUtil {
* Convert a protocol buffer Mutate to a Delete
* @param proto the protocol buffer Mutate to convert
* @param cellScanner if non-null, the data that goes with this delete.
- * @return the converted client Delete n
+ * @return the converted client Delete
*/
public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner)
throws IOException {
@@ -920,7 +920,7 @@ public final class ProtobufUtil {
/**
* Convert a MutateRequest to Mutation
* @param proto the protocol buffer Mutate to convert
- * @return the converted Mutation n
+ * @return the converted Mutation
*/
public static Mutation toMutation(final MutationProto proto) throws IOException {
MutationType type = proto.getMutateType();
@@ -968,7 +968,7 @@ public final class ProtobufUtil {
/**
* Convert a client Scan to a protocol buffer Scan
* @param scan the client Scan to convert
- * @return the converted protocol buffer Scan n
+ * @return the converted protocol buffer Scan
*/
public static ClientProtos.Scan toScan(final Scan scan) throws IOException {
ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder();
@@ -1062,7 +1062,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer Scan to a client Scan
* @param proto the protocol buffer Scan to convert
- * @return the converted client Scan n
+ * @return the converted client Scan
*/
public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
byte[] startRow = HConstants.EMPTY_START_ROW;
@@ -1182,7 +1182,7 @@ public final class ProtobufUtil {
/**
* Create a protocol buffer Get based on a client Get.
* @param get the client Get
- * @return a protocol buffer Get n
+ * @return a protocol buffer Get
*/
public static ClientProtos.Get toGet(final Get get) throws IOException {
ClientProtos.Get.Builder builder = ClientProtos.Get.newBuilder();
@@ -1248,7 +1248,8 @@ public final class ProtobufUtil {
}
/**
- * Create a protocol buffer Mutate based on a client Mutation nn * @return a protobuf'd Mutation n
+ * Create a protocol buffer Mutate based on a client Mutation
+ * @return a protobuf'd Mutation
*/
public static MutationProto toMutation(final MutationType type, final Mutation mutation,
final long nonce) throws IOException {
@@ -1297,8 +1298,8 @@ public final class ProtobufUtil {
/**
* Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
- * Understanding is that the Cell will be transported other than via protobuf. nnn * @return a
- * protobuf'd Mutation n
+ * Understanding is that the Cell will be transported other than via protobuf.
+ * @return a protobuf'd Mutation
*/
public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation,
final MutationProto.Builder builder) throws IOException {
@@ -1307,8 +1308,8 @@ public final class ProtobufUtil {
/**
* Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
- * Understanding is that the Cell will be transported other than via protobuf. nn * @return a
- * protobuf'd Mutation n
+ * Understanding is that the Cell will be transported other than via protobuf.
+ * @return a protobuf'd Mutation
*/
public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation)
throws IOException {
@@ -1334,8 +1335,8 @@ public final class ProtobufUtil {
/**
* Code shared by {@link #toMutation(MutationType, Mutation)} and
- * {@link #toMutationNoData(MutationType, Mutation)} nn * @return A partly-filled out protobuf'd
- * Mutation.
+ * {@link #toMutationNoData(MutationType, Mutation)}
+ * @return A partly-filled out protobuf'd Mutation.
*/
private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type,
final Mutation mutation, MutationProto.Builder builder) {
@@ -1468,7 +1469,7 @@ public final class ProtobufUtil {
* Convert a protocol buffer Result to a client Result
* @param proto the protocol buffer Result to convert
* @param scanner Optional cell scanner.
- * @return the converted client Result n
+ * @return the converted client Result
*/
public static Result toResult(final ClientProtos.Result proto, final CellScanner scanner)
throws IOException {
@@ -1583,8 +1584,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a delete KeyValue type to protocol buffer DeleteType. n * @return protocol buffer
- * DeleteType n
+ * Convert a delete KeyValue type to protocol buffer DeleteType.
+ * @return protocol buffer DeleteType
*/
public static DeleteType toDeleteType(KeyValue.Type type) throws IOException {
switch (type) {
@@ -1604,7 +1605,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer DeleteType to delete KeyValue type.
* @param type The DeleteType
- * @return The type. n
+ * @return The type.
*/
public static KeyValue.Type fromDeleteType(DeleteType type) throws IOException {
switch (type) {
@@ -1690,7 +1691,7 @@ public final class ProtobufUtil {
}
/**
- * A helper to close a region given a region name using admin protocol. nnn
+ * A helper to close a region given a region name using admin protocol.
*/
public static void closeRegion(final RpcController controller,
final AdminService.BlockingInterface admin, final ServerName server, final byte[] regionName)
@@ -1705,7 +1706,7 @@ public final class ProtobufUtil {
}
/**
- * A helper to warmup a region given a region name using admin protocol nn *
+ * A helper to warmup a region given a region name using admin protocol
*/
public static void warmupRegion(final RpcController controller,
final AdminService.BlockingInterface admin,
@@ -1722,7 +1723,7 @@ public final class ProtobufUtil {
}
/**
- * A helper to open a region using admin protocol. nnn
+ * A helper to open a region using admin protocol.
*/
public static void openRegion(final RpcController controller,
final AdminService.BlockingInterface admin, ServerName server,
@@ -1736,8 +1737,8 @@ public final class ProtobufUtil {
}
/**
- * A helper to get the all the online regions on a region server using admin protocol. n * @return
- * a list of online region info n
+ * A helper to get the all the online regions on a region server using admin protocol.
+ * @return a list of online region info
*/
public static List
getOnlineRegions(final AdminService.BlockingInterface admin) throws IOException {
@@ -2069,7 +2070,8 @@ public final class ProtobufUtil {
/**
* Return short version of Message toString'd, shorter than TextFormat#shortDebugString. Tries to
* NOT print out data both because it can be big but also so we do not have data in our logs. Use
- * judiciously. n * @return toString of passed m
+ * judiciously.
+ * @return toString of passed m
*/
public static String getShortTextFormat(Message m) {
if (m == null) return "null";
@@ -2216,8 +2218,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer CellVisibility to a client CellVisibility n * @return the converted
- * client CellVisibility
+ * Convert a protocol buffer CellVisibility to a client CellVisibility
+ * @return the converted client CellVisibility
*/
public static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) {
if (proto == null) return null;
@@ -2225,8 +2227,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer CellVisibility bytes to a client CellVisibility n * @return the
- * converted client CellVisibility n
+ * Convert a protocol buffer CellVisibility bytes to a client CellVisibility
+ * @return the converted client CellVisibility
*/
public static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException {
if (protoBytes == null) return null;
@@ -2242,8 +2244,8 @@ public final class ProtobufUtil {
}
/**
- * Create a protocol buffer CellVisibility based on a client CellVisibility. n * @return a
- * protocol buffer CellVisibility
+ * Create a protocol buffer CellVisibility based on a client CellVisibility.
+ * @return a protocol buffer CellVisibility
*/
public static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility) {
ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder();
@@ -2252,8 +2254,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer Authorizations to a client Authorizations n * @return the converted
- * client Authorizations
+ * Convert a protocol buffer Authorizations to a client Authorizations
+ * @return the converted client Authorizations
*/
public static Authorizations toAuthorizations(ClientProtos.Authorizations proto) {
if (proto == null) return null;
@@ -2261,8 +2263,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer Authorizations bytes to a client Authorizations n * @return the
- * converted client Authorizations n
+ * Convert a protocol buffer Authorizations bytes to a client Authorizations
+ * @return the converted client Authorizations
*/
public static Authorizations toAuthorizations(byte[] protoBytes) throws DeserializationException {
if (protoBytes == null) return null;
@@ -2278,8 +2280,8 @@ public final class ProtobufUtil {
}
/**
- * Create a protocol buffer Authorizations based on a client Authorizations. n * @return a
- * protocol buffer Authorizations
+ * Create a protocol buffer Authorizations based on a client Authorizations.
+ * @return a protocol buffer Authorizations
*/
public static ClientProtos.Authorizations toAuthorizations(Authorizations authorizations) {
ClientProtos.Authorizations.Builder builder = ClientProtos.Authorizations.newBuilder();
@@ -2290,8 +2292,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer TimeUnit to a client TimeUnit n * @return the converted client
- * TimeUnit
+ * Convert a protocol buffer TimeUnit to a client TimeUnit
+ * @return the converted client TimeUnit
*/
public static TimeUnit toTimeUnit(final HBaseProtos.TimeUnit proto) {
switch (proto) {
@@ -2314,8 +2316,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a client TimeUnit to a protocol buffer TimeUnit n * @return the converted protocol
- * buffer TimeUnit
+ * Convert a client TimeUnit to a protocol buffer TimeUnit
+ * @return the converted protocol buffer TimeUnit
*/
public static HBaseProtos.TimeUnit toProtoTimeUnit(final TimeUnit timeUnit) {
switch (timeUnit) {
@@ -2338,8 +2340,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer ThrottleType to a client ThrottleType n * @return the converted
- * client ThrottleType
+ * Convert a protocol buffer ThrottleType to a client ThrottleType
+ * @return the converted client ThrottleType
*/
public static ThrottleType toThrottleType(final QuotaProtos.ThrottleType proto) {
switch (proto) {
@@ -2367,8 +2369,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a client ThrottleType to a protocol buffer ThrottleType n * @return the converted
- * protocol buffer ThrottleType
+ * Convert a client ThrottleType to a protocol buffer ThrottleType
+ * @return the converted protocol buffer ThrottleType
*/
public static QuotaProtos.ThrottleType toProtoThrottleType(final ThrottleType type) {
switch (type) {
@@ -2396,8 +2398,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer QuotaScope to a client QuotaScope n * @return the converted client
- * QuotaScope
+ * Convert a protocol buffer QuotaScope to a client QuotaScope
+ * @return the converted client QuotaScope
*/
public static QuotaScope toQuotaScope(final QuotaProtos.QuotaScope proto) {
switch (proto) {
@@ -2410,8 +2412,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a client QuotaScope to a protocol buffer QuotaScope n * @return the converted protocol
- * buffer QuotaScope
+ * Convert a client QuotaScope to a protocol buffer QuotaScope
+ * @return the converted protocol buffer QuotaScope
*/
public static QuotaProtos.QuotaScope toProtoQuotaScope(final QuotaScope scope) {
switch (scope) {
@@ -2424,8 +2426,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a protocol buffer QuotaType to a client QuotaType n * @return the converted client
- * QuotaType
+ * Convert a protocol buffer QuotaType to a client QuotaType
+ * @return the converted client QuotaType
*/
public static QuotaType toQuotaScope(final QuotaProtos.QuotaType proto) {
switch (proto) {
@@ -2438,8 +2440,8 @@ public final class ProtobufUtil {
}
/**
- * Convert a client QuotaType to a protocol buffer QuotaType n * @return the converted protocol
- * buffer QuotaType
+ * Convert a client QuotaType to a protocol buffer QuotaType
+ * @return the converted protocol buffer QuotaType
*/
public static QuotaProtos.QuotaType toProtoQuotaScope(final QuotaType type) {
switch (type) {
@@ -2566,7 +2568,7 @@ public final class ProtobufUtil {
* This version of protobuf's mergeDelimitedFrom avoid the hard-coded 64MB limit for decoding
* buffers
* @param builder current message builder
- * @param in Inputsream with delimited protobuf data n
+ * @param in Inputsream with delimited protobuf data
*/
public static void mergeDelimitedFrom(Message.Builder builder, InputStream in)
throws IOException {
@@ -2588,7 +2590,7 @@ public final class ProtobufUtil {
* where the message size is known
* @param builder current message builder
* @param in InputStream containing protobuf data
- * @param size known size of protobuf data n
+ * @param size known size of protobuf data
*/
public static void mergeFrom(Message.Builder builder, InputStream in, int size)
throws IOException {
@@ -2602,7 +2604,7 @@ public final class ProtobufUtil {
* This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers
* where the message size is not known
* @param builder current message builder
- * @param in InputStream containing protobuf data n
+ * @param in InputStream containing protobuf data
*/
public static void mergeFrom(Message.Builder builder, InputStream in) throws IOException {
final CodedInputStream codedInput = CodedInputStream.newInstance(in);
@@ -2615,7 +2617,7 @@ public final class ProtobufUtil {
* This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
* working with ByteStrings
* @param builder current message builder
- * @param bs ByteString containing the n
+ * @param bs ByteString containing the
*/
public static void mergeFrom(Message.Builder builder, ByteString bs) throws IOException {
final CodedInputStream codedInput = bs.newCodedInput();
@@ -2628,7 +2630,7 @@ public final class ProtobufUtil {
* This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
* working with byte arrays
* @param builder current message builder
- * @param b byte array n
+ * @param b byte array
*/
public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException {
final CodedInputStream codedInput = CodedInputStream.newInstance(b);
@@ -2641,7 +2643,7 @@ public final class ProtobufUtil {
* This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
* working with byte arrays
* @param builder current message builder
- * @param b byte array nnn
+ * @param b byte array
*/
public static void mergeFrom(Message.Builder builder, byte[] b, int offset, int length)
throws IOException {
@@ -2821,7 +2823,7 @@ public final class ProtobufUtil {
/**
* Creates {@link CompactionState} from {@link GetRegionInfoResponse.CompactionState} state
- * @param state the protobuf CompactionState n
+ * @param state the protobuf CompactionState
*/
public static CompactionState createCompactionState(GetRegionInfoResponse.CompactionState state) {
return CompactionState.valueOf(state.toString());
@@ -2833,7 +2835,7 @@ public final class ProtobufUtil {
/**
* Creates {@link CompactionState} from {@link RegionLoad.CompactionState} state
- * @param state the protobuf CompactionState n
+ * @param state the protobuf CompactionState
*/
public static CompactionState
createCompactionStateForRegionLoad(RegionLoad.CompactionState state) {
@@ -2938,9 +2940,7 @@ public final class ProtobufUtil {
stats.getCompactionPressure());
}
- /**
- * n * @return A String version of the passed in msg
- */
+ /** Returns A String version of the passed in msg
*/
public static String toText(Message msg) {
return TextFormat.shortDebugString(msg);
}
@@ -2950,7 +2950,7 @@ public final class ProtobufUtil {
}
/**
- * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it. n
+ * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it.
*/
public static T call(Callable callable) throws IOException {
try {
@@ -3061,7 +3061,7 @@ public final class ProtobufUtil {
* magic and that is then followed by a protobuf that has a serialized
* {@link ServerName} in it.
* @return Returns null if data
is null else converts passed data to a ServerName
- * instance. n
+ * instance.
*/
public static ServerName parseServerNameFrom(final byte[] data) throws DeserializationException {
if (data == null || data.length <= 0) return null;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index f678a43986d..9c88b61fd67 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -195,7 +195,7 @@ public final class RequestConverter {
/**
* Create a protocol buffer MutateRequest for a conditioned put/delete/increment/append
- * @return a mutate request n
+ * @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final byte[] row,
final byte[] family, final byte[] qualifier, final CompareOperator op, final byte[] value,
@@ -215,7 +215,7 @@ public final class RequestConverter {
/**
* Create a protocol buffer MultiRequest for conditioned row mutations
- * @return a multi request n
+ * @return a multi request
*/
public static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName,
final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op,
@@ -272,7 +272,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer MutateRequest for a put nn * @return a mutate request n
+ * Create a protocol buffer MutateRequest for a put
+ * @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final Put put)
throws IOException {
@@ -284,7 +285,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer MutateRequest for an append nn * @return a mutate request n
+ * Create a protocol buffer MutateRequest for an append
+ * @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final Append append,
long nonceGroup, long nonce) throws IOException {
@@ -300,7 +302,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer MutateRequest for a client increment nn * @return a mutate request
+ * Create a protocol buffer MutateRequest for a client increment
+ * @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final Increment increment,
final long nonceGroup, final long nonce) throws IOException {
@@ -316,7 +319,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer MutateRequest for a delete nn * @return a mutate request n
+ * Create a protocol buffer MutateRequest for a delete
+ * @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final Delete delete)
throws IOException {
@@ -336,7 +340,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer ScanRequest for a client Scan nnnn * @return a scan request n
+ * Create a protocol buffer ScanRequest for a client Scan
+ * @return a scan request
*/
public static ScanRequest buildScanRequest(byte[] regionName, Scan scan, int numberOfRows,
boolean closeScanner) throws IOException {
@@ -356,7 +361,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer ScanRequest for a scanner id nnn * @return a scan request
+ * Create a protocol buffer ScanRequest for a scanner id
+ * @return a scan request
*/
public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boolean closeScanner,
boolean trackMetrics) {
@@ -371,7 +377,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer ScanRequest for a scanner id nnnn * @return a scan request
+ * Create a protocol buffer ScanRequest for a scanner id
+ * @return a scan request
*/
public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boolean closeScanner,
long nextCallSeq, boolean trackMetrics, boolean renew, int limitOfRows) {
@@ -391,7 +398,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer bulk load request nnnnnn * @return a bulk load request
+ * Create a protocol buffer bulk load request
+ * @return a bulk load request
*/
public static BulkLoadHFileRequest buildBulkLoadHFileRequest(
final List> familyPaths, final byte[] regionName, boolean assignSeqNum,
@@ -457,7 +465,7 @@ public final class RequestConverter {
* @param mutationBuilder mutationBuilder to be used to build mutation.
* @param nonceGroup nonceGroup to be applied.
* @param indexMap Map of created RegionAction to the original index for a
- * RowMutations/CheckAndMutate within the original list of actions n
+ * RowMutations/CheckAndMutate within the original list of actions
*/
public static void buildNoDataRegionActions(final byte[] regionName,
final Iterable actions, final List cells,
@@ -825,7 +833,8 @@ public final class RequestConverter {
/**
* Create a CompactRegionRequest for a given region name
* @param regionName the name of the region to get info
- * @param major indicator if it is a major compaction n * @return a CompactRegionRequest
+ * @param major indicator if it is a major compaction
+ * @return a CompactRegionRequest
*/
public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName, boolean major,
byte[] columnFamily) {
@@ -883,7 +892,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer AddColumnRequest nn * @return an AddColumnRequest
+ * Create a protocol buffer AddColumnRequest
+ * @return an AddColumnRequest
*/
public static AddColumnRequest buildAddColumnRequest(final TableName tableName,
final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) {
@@ -896,7 +906,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer DeleteColumnRequest nn * @return a DeleteColumnRequest
+ * Create a protocol buffer DeleteColumnRequest
+ * @return a DeleteColumnRequest
*/
public static DeleteColumnRequest buildDeleteColumnRequest(final TableName tableName,
final byte[] columnName, final long nonceGroup, final long nonce) {
@@ -909,7 +920,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer ModifyColumnRequest nn * @return an ModifyColumnRequest
+ * Create a protocol buffer ModifyColumnRequest
+ * @return an ModifyColumnRequest
*/
public static ModifyColumnRequest buildModifyColumnRequest(final TableName tableName,
final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) {
@@ -935,7 +947,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer MoveRegionRequest nn * @return A MoveRegionRequest
+ * Create a protocol buffer MoveRegionRequest
+ * @return A MoveRegionRequest
*/
public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName,
ServerName destServerName) {
@@ -976,7 +989,8 @@ public final class RequestConverter {
}
/**
- * Create a protocol buffer AssignRegionRequest n * @return an AssignRegionRequest
+ * Create a protocol buffer AssignRegionRequest
+ * @return an AssignRegionRequest
*/
public static AssignRegionRequest buildAssignRegionRequest(final byte[] regionName) {
AssignRegionRequest.Builder builder = AssignRegionRequest.newBuilder();
@@ -985,7 +999,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer UnassignRegionRequest n * @return an UnassignRegionRequest
+ * Creates a protocol buffer UnassignRegionRequest
+ * @return an UnassignRegionRequest
*/
public static UnassignRegionRequest buildUnassignRegionRequest(final byte[] regionName) {
UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder();
@@ -994,7 +1009,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer OfflineRegionRequest n * @return an OfflineRegionRequest
+ * Creates a protocol buffer OfflineRegionRequest
+ * @return an OfflineRegionRequest
*/
public static OfflineRegionRequest buildOfflineRegionRequest(final byte[] regionName) {
OfflineRegionRequest.Builder builder = OfflineRegionRequest.newBuilder();
@@ -1003,7 +1019,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer DeleteTableRequest n * @return a DeleteTableRequest
+ * Creates a protocol buffer DeleteTableRequest
+ * @return a DeleteTableRequest
*/
public static DeleteTableRequest buildDeleteTableRequest(final TableName tableName,
final long nonceGroup, final long nonce) {
@@ -1031,7 +1048,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer EnableTableRequest n * @return an EnableTableRequest
+ * Creates a protocol buffer EnableTableRequest
+ * @return an EnableTableRequest
*/
public static EnableTableRequest buildEnableTableRequest(final TableName tableName,
final long nonceGroup, final long nonce) {
@@ -1043,7 +1061,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer DisableTableRequest n * @return a DisableTableRequest
+ * Creates a protocol buffer DisableTableRequest
+ * @return a DisableTableRequest
*/
public static DisableTableRequest buildDisableTableRequest(final TableName tableName,
final long nonceGroup, final long nonce) {
@@ -1055,7 +1074,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer CreateTableRequest nn * @return a CreateTableRequest
+ * Creates a protocol buffer CreateTableRequest
+ * @return a CreateTableRequest
*/
public static CreateTableRequest buildCreateTableRequest(final TableDescriptor tableDescriptor,
final byte[][] splitKeys, final long nonceGroup, final long nonce) {
@@ -1072,7 +1092,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer ModifyTableRequest nn * @return a ModifyTableRequest
+ * Creates a protocol buffer ModifyTableRequest
+ * @return a ModifyTableRequest
*/
public static ModifyTableRequest buildModifyTableRequest(final TableName tableName,
final TableDescriptor tableDesc, final long nonceGroup, final long nonce) {
@@ -1096,7 +1117,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer GetTableDescriptorsRequest n * @return a GetTableDescriptorsRequest
+ * Creates a protocol buffer GetTableDescriptorsRequest
+ * @return a GetTableDescriptorsRequest
*/
public static GetTableDescriptorsRequest
buildGetTableDescriptorsRequest(final List tableNames) {
@@ -1193,7 +1215,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer SetBalancerRunningRequest nn * @return a SetBalancerRunningRequest
+ * Creates a protocol buffer SetBalancerRunningRequest
+ * @return a SetBalancerRunningRequest
*/
public static SetBalancerRunningRequest buildSetBalancerRunningRequest(boolean on,
boolean synchronous) {
@@ -1278,8 +1301,8 @@ public final class RequestConverter {
}
/**
- * Creates a request for querying the master the last flushed sequence Id for a region n * @return
- * A {@link GetLastFlushedSequenceIdRequest}
+ * Creates a request for querying the master the last flushed sequence Id for a region
+ * @return A {@link GetLastFlushedSequenceIdRequest}
*/
public static GetLastFlushedSequenceIdRequest
buildGetLastFlushedSequenceIdRequest(byte[] regionName) {
@@ -1330,7 +1353,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer SetNormalizerRunningRequest n * @return a SetNormalizerRunningRequest
+ * Creates a protocol buffer SetNormalizerRunningRequest
+ * @return a SetNormalizerRunningRequest
*/
public static SetNormalizerRunningRequest buildSetNormalizerRunningRequest(boolean on) {
return SetNormalizerRunningRequest.newBuilder().setOn(on).build();
@@ -1438,7 +1462,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer CreateNamespaceRequest n * @return a CreateNamespaceRequest
+ * Creates a protocol buffer CreateNamespaceRequest
+ * @return a CreateNamespaceRequest
*/
public static CreateNamespaceRequest
buildCreateNamespaceRequest(final NamespaceDescriptor descriptor) {
@@ -1448,7 +1473,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer ModifyNamespaceRequest n * @return a ModifyNamespaceRequest
+ * Creates a protocol buffer ModifyNamespaceRequest
+ * @return a ModifyNamespaceRequest
*/
public static ModifyNamespaceRequest
buildModifyNamespaceRequest(final NamespaceDescriptor descriptor) {
@@ -1458,7 +1484,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer DeleteNamespaceRequest n * @return a DeleteNamespaceRequest
+ * Creates a protocol buffer DeleteNamespaceRequest
+ * @return a DeleteNamespaceRequest
*/
public static DeleteNamespaceRequest buildDeleteNamespaceRequest(final String name) {
DeleteNamespaceRequest.Builder builder = DeleteNamespaceRequest.newBuilder();
@@ -1467,8 +1494,8 @@ public final class RequestConverter {
}
/**
- * Creates a protocol buffer GetNamespaceDescriptorRequest n * @return a
- * GetNamespaceDescriptorRequest
+ * Creates a protocol buffer GetNamespaceDescriptorRequest
+ * @return a GetNamespaceDescriptorRequest
*/
public static GetNamespaceDescriptorRequest
buildGetNamespaceDescriptorRequest(final String name) {
@@ -1592,7 +1619,7 @@ public final class RequestConverter {
/**
* Creates IsSnapshotCleanupEnabledRequest to determine if auto snapshot cleanup based on TTL
- * expiration is turned on n
+ * expiration is turned on
*/
public static IsSnapshotCleanupEnabledRequest buildIsSnapshotCleanupEnabledRequest() {
return IsSnapshotCleanupEnabledRequest.newBuilder().build();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
index 440891382e7..09cbc460f22 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
@@ -90,7 +90,7 @@ public final class ResponseConverter {
* @param request the original protocol buffer MultiRequest
* @param response the protocol buffer MultiResponse to convert
* @param cells Cells to go with the passed in proto
. Can be null.
- * @return the results that were in the MultiResponse (a Result or an Exception). n
+ * @return the results that were in the MultiResponse (a Result or an Exception).
*/
public static org.apache.hadoop.hbase.client.MultiResponse getResults(final MultiRequest request,
final MultiResponse response, final CellScanner cells) throws IOException {
@@ -103,7 +103,7 @@ public final class ResponseConverter {
* @param indexMap Used to support RowMutations/CheckAndMutate in batch
* @param response the protocol buffer MultiResponse to convert
* @param cells Cells to go with the passed in proto
. Can be null.
- * @return the results that were in the MultiResponse (a Result or an Exception). n
+ * @return the results that were in the MultiResponse (a Result or an Exception).
*/
public static org.apache.hadoop.hbase.client.MultiResponse getResults(final MultiRequest request,
final Map indexMap, final MultiResponse response, final CellScanner cells)
@@ -247,7 +247,8 @@ public final class ResponseConverter {
}
/**
- * Wrap a throwable to an action result. n * @return an action result builder
+ * Wrap a throwable to an action result.
+ * @return an action result builder
*/
public static ResultOrException.Builder buildActionResult(final Throwable t) {
ResultOrException.Builder builder = ResultOrException.newBuilder();
@@ -256,7 +257,8 @@ public final class ResponseConverter {
}
/**
- * Wrap a throwable to an action result. n * @return an action result builder
+ * Wrap a throwable to an action result.
+ * @return an action result builder
*/
public static ResultOrException.Builder buildActionResult(final ClientProtos.Result r) {
ResultOrException.Builder builder = ResultOrException.newBuilder();
@@ -264,9 +266,7 @@ public final class ResponseConverter {
return builder;
}
- /**
- * n * @return NameValuePair of the exception name to stringified version os exception.
- */
+ /** Returns NameValuePair of the exception name to stringified version os exception. */
public static NameBytesPair buildException(final Throwable t) {
NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder();
parameterBuilder.setName(t.getClass().getName());
@@ -307,7 +307,8 @@ public final class ResponseConverter {
}
/**
- * A utility to build a GetServerInfoResponse. nn * @return the response
+ * A utility to build a GetServerInfoResponse.
+ * @return the response
*/
public static GetServerInfoResponse buildGetServerInfoResponse(final ServerName serverName,
final int webuiPort) {
@@ -322,7 +323,8 @@ public final class ResponseConverter {
}
/**
- * A utility to build a GetOnlineRegionResponse. n * @return the response
+ * A utility to build a GetOnlineRegionResponse.
+ * @return the response
*/
public static GetOnlineRegionResponse
buildGetOnlineRegionResponse(final List regions) {
@@ -405,7 +407,7 @@ public final class ResponseConverter {
}
/**
- * Create Results from the cells using the cells meta data. nnn
+ * Create Results from the cells using the cells meta data.
*/
public static Result[] getResults(CellScanner cellScanner, ScanResponse response)
throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
index 091515c325e..2787b5ab7f9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
@@ -91,7 +91,7 @@ public class Writables {
* @return The passed Writable after its readFields has been called fed by the passed
* bytes
array or IllegalArgumentException if passed null or an empty
* bytes
array.
- * @throws IOException e n
+ * @throws IOException e
*/
public static Writable getWritable(final byte[] bytes, final Writable w) throws IOException {
return getWritable(bytes, 0, bytes.length, w);
@@ -107,7 +107,7 @@ public class Writables {
* @return The passed Writable after its readFields has been called fed by the passed
* bytes
array or IllegalArgumentException if passed null or an empty
* bytes
array.
- * @throws IOException e n
+ * @throws IOException e
*/
public static Writable getWritable(final byte[] bytes, final int offset, final int length,
final Writable w) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
index 97c8302b221..8d3fcd2c342 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
@@ -144,7 +144,7 @@ public class ZNodePaths {
/**
* Parses the meta replicaId from the passed path.
- * @param path the name of the full path which includes baseZNode. n
+ * @param path the name of the full path which includes baseZNode.
*/
public int getMetaReplicaIdFromPath(String path) {
// Extract the znode from path. The prefix is of the following format.
@@ -155,7 +155,7 @@ public class ZNodePaths {
/**
* Parse the meta replicaId from the passed znode
- * @param znode the name of the znode, does not include baseZNode n
+ * @param znode the name of the znode, does not include baseZNode
*/
public int getMetaReplicaIdFromZNode(String znode) {
return znode.equals(metaZNodePrefix)
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java
index cc329cd3d03..cce3ba4e4e3 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java
@@ -42,7 +42,7 @@ public class TestDeleteTimeStamp {
private static final byte[] QUALIFIER = Bytes.toBytes("testQualifier");
/*
- * Test for verifying that the timestamp in delete object is being honored. n
+ * Test for verifying that the timestamp in delete object is being honored.
*/
@Test
public void testTimeStamp() {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
index 27cf51e7c9f..d7eef52a4f9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
@@ -100,7 +100,8 @@ public final class AuthUtil {
/**
* For kerberized cluster, return login user (from kinit or from keytab if specified). For
* non-kerberized cluster, return system user.
- * @param conf configuartion file n * @throws IOException login exception
+ * @param conf configuartion file
+ * @throws IOException login exception
*/
@InterfaceAudience.Private
public static User loginClient(Configuration conf) throws IOException {
@@ -160,7 +161,8 @@ public final class AuthUtil {
*
* NOT recommend to use to method unless you're sure what you're doing, it is for canary only.
* Please use User#loginClient.
- * @param conf configuration file n * @throws IOException login exception
+ * @param conf configuration file
+ * @throws IOException login exception
*/
private static User loginClientAsService(Configuration conf) throws IOException {
UserProvider provider = UserProvider.instantiate(conf);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
index e5050b864ca..a29a98a8c09 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
@@ -54,7 +54,7 @@ public class ByteBufferKeyOnlyKeyValue extends ByteBufferExtendedCell {
/**
* A setter that helps to avoid object creation every time and whenever there is a need to create
- * new OffheapKeyOnlyKeyValue. nnn
+ * new OffheapKeyOnlyKeyValue.
*/
public void setKey(ByteBuffer key, int offset, int length) {
setKey(key, offset, length, ByteBufferUtils.toShort(key, offset));
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
index 28128ee37c6..677ed2295ce 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
@@ -48,7 +48,7 @@ public interface CellBuilder {
Cell build();
/**
- * Remove all internal elements from builder. n
+ * Remove all internal elements from builder.
*/
CellBuilder clear();
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
index b4d3b5549db..2c19c0f1043 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
@@ -661,9 +661,8 @@ public class CellComparatorImpl implements CellComparator {
/**
* Compares the row part of the cell with a simple plain byte[] like the stopRow in Scan. This
* should be used with context where for hbase:meta cells the
- * {{@link MetaCellComparator#META_COMPARATOR} should be used n * the cell to be compared n * the
- * kv serialized byte[] to be compared with n * the offset in the byte[] n * the length in the
- * byte[]
+ * {{@link MetaCellComparator#META_COMPARATOR} should be used the cell to be compared the kv
+ * serialized byte[] to be compared with the offset in the byte[] the length in the byte[]
* @return 0 if both cell and the byte[] are equal, 1 if the cell is bigger than byte[], -1
* otherwise
*/
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index c28d0d87525..80dcf8c505d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -77,7 +77,8 @@ public final class CellUtil {
/**
* Makes a column in family:qualifier form from separate byte arrays.
*
- * Not recommended for usage as this is old-style API. nn * @return family:qualifier
+ * Not recommended for usage as this is old-style API.
+ * @return family:qualifier
*/
public static byte[] makeColumn(byte[] family, byte[] qualifier) {
return Bytes.add(family, COLUMN_FAMILY_DELIM_ARRAY, qualifier);
@@ -292,9 +293,7 @@ public final class CellUtil {
return destinationOffset + vlen;
}
- /**
- * n * @return CellScanner interface over cellIterables
- */
+ /** Returns CellScanner interface over cellIterables
*/
public static CellScanner
createCellScanner(final List extends CellScannable> cellScannerables) {
return new CellScanner() {
@@ -320,17 +319,15 @@ public final class CellUtil {
};
}
- /**
- * n * @return CellScanner interface over cellIterable
- */
+ /** Returns CellScanner interface over cellIterable
*/
public static CellScanner createCellScanner(final Iterable cellIterable) {
if (cellIterable == null) return null;
return createCellScanner(cellIterable.iterator());
}
/**
- * n * @return CellScanner interface over cellIterable or null if cells
- * is null
+ * Returns CellScanner interface over cellIterable or null if cells is
+ * null
*/
public static CellScanner createCellScanner(final Iterator cells) {
if (cells == null) return null;
@@ -352,9 +349,7 @@ public final class CellUtil {
};
}
- /**
- * n * @return CellScanner interface over cellArray
- */
+ /** Returns CellScanner interface over cellArray */
public static CellScanner createCellScanner(final Cell[] cellArray) {
return new CellScanner() {
private final Cell[] cells = cellArray;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
index ddbf71cac13..432556d2642 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
@@ -145,7 +145,7 @@ public class CompoundConfiguration extends Configuration {
/**
* Add Bytes map to config list. This map is generally created by HTableDescriptor or
* HColumnDescriptor, but can be abstractly used. The added configuration overrides the previous
- * ones if there are name collisions. n * Bytes map
+ * ones if there are name collisions. Bytes map
* @return this, for builder pattern
*/
public CompoundConfiguration addBytesMap(final Map map) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
index b3b7a1c5e57..28e648ec466 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
@@ -41,7 +41,7 @@ public interface ExtendedCell extends RawCell, HeapSize {
* <tags>
* @param out Stream to which cell has to be written
* @param withTags Whether to write tags.
- * @return how many bytes are written. n
+ * @return how many bytes are written.
*/
// TODO remove the boolean param once HBASE-16706 is done.
default int write(OutputStream out, boolean withTags) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
index 5d428d0b434..5fc030581da 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
@@ -182,7 +182,7 @@ public class HBaseConfiguration extends Configuration {
* @param conf configuration instance for accessing the passwords
* @param alias the name of the password element
* @param defPass the default password
- * @return String password or default password n
+ * @return String password or default password
*/
public static String getPassword(Configuration conf, String alias, String defPass)
throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 59c54b0c031..3661c063e88 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -1840,8 +1840,8 @@ public class KeyValue implements ExtendedCell, Cloneable {
* Compare columnFamily, qualifier, timestamp, and key type (everything except the row). This
* method is used both in the normal comparator and the "same-prefix" comparator. Note that we
* are assuming that row portions of both KVs have already been parsed and found identical, and
- * we don't validate that assumption here. n * the length of the common prefix of the two
- * key-values being compared, including row length and row
+ * we don't validate that assumption here. the length of the common prefix of the two key-values
+ * being compared, including row length and row
*/
private int compareWithoutRow(int commonPrefix, byte[] left, int loffset, int llength,
byte[] right, int roffset, int rlength, short rowlength) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
index 4291d904fe8..ed3687e9ed4 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
@@ -55,8 +55,8 @@ public class KeyValueTestUtil {
/**
* Checks whether KeyValues from kvCollection2 are contained in kvCollection1. The comparison is
- * made without distinguishing MVCC version of the KeyValues nn * @return true if KeyValues from
- * kvCollection2 are contained in kvCollection1
+ * made without distinguishing MVCC version of the KeyValues
+ * @return true if KeyValues from kvCollection2 are contained in kvCollection1
*/
public static boolean containsIgnoreMvccVersion(Collection extends Cell> kvCollection1,
Collection extends Cell> kvCollection2) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
index bdf77d511af..71f1da9a8a6 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
@@ -58,8 +58,8 @@ public class KeyValueUtil {
/**
* Returns number of bytes this cell's key part would have been used if serialized as in
- * {@link KeyValue}. Key includes rowkey, family, qualifier, timestamp and type. n * @return the
- * key length
+ * {@link KeyValue}. Key includes rowkey, family, qualifier, timestamp and type.
+ * @return the key length
*/
public static int keyLength(final Cell cell) {
return keyLength(cell.getRowLength(), cell.getFamilyLength(), cell.getQualifierLength());
@@ -96,8 +96,8 @@ public class KeyValueUtil {
}
/**
- * The position will be set to the beginning of the new ByteBuffer n * @return the Bytebuffer
- * containing the key part of the cell
+ * The position will be set to the beginning of the new ByteBuffer
+ * @return the Bytebuffer containing the key part of the cell
*/
public static ByteBuffer copyKeyToNewByteBuffer(final Cell cell) {
byte[] bytes = new byte[keyLength(cell)];
@@ -107,8 +107,8 @@ public class KeyValueUtil {
}
/**
- * Copies the key to a new KeyValue n * @return the KeyValue that consists only the key part of
- * the incoming cell
+ * Copies the key to a new KeyValue
+ * @return the KeyValue that consists only the key part of the incoming cell
*/
public static KeyValue toNewKeyCell(final Cell cell) {
byte[] bytes = new byte[keyLength(cell)];
@@ -203,7 +203,7 @@ public class KeyValueUtil {
/**
* Creates a new KeyValue object positioned in the supplied ByteBuffer and sets the ByteBuffer's
- * position to the start of the next KeyValue. Does not allocate a new array or copy data. nnn
+ * position to the start of the next KeyValue. Does not allocate a new array or copy data.
*/
public static KeyValue nextShallowCopy(final ByteBuffer bb, final boolean includesMvccVersion,
boolean includesTags) {
@@ -236,7 +236,8 @@ public class KeyValueUtil {
/**
* Decrement the timestamp. For tests (currently wasteful) Remember timestamps are sorted reverse
- * chronologically. n * @return previous key
+ * chronologically.
+ * @return previous key
*/
public static KeyValue previousKey(final KeyValue in) {
return createFirstOnRow(CellUtil.cloneRow(in), CellUtil.cloneFamily(in),
@@ -246,9 +247,8 @@ public class KeyValueUtil {
/**
* Create a KeyValue for the specified row, family and qualifier that would be larger than or
* equal to all other possible KeyValues that have the same row, family, qualifier. Used for
- * reseeking. Should NEVER be returned to a client. n * row key n * row offset n * row length n *
- * family name n * family offset n * family length n * column qualifier n * qualifier offset n *
- * qualifier length
+ * reseeking. Should NEVER be returned to a client. row key row offset row length family name
+ * family offset family length column qualifier qualifier offset qualifier length
* @return Last possible key on passed row, family, qualifier.
*/
public static KeyValue createLastOnRow(final byte[] row, final int roffset, final int rlength,
@@ -408,11 +408,11 @@ public class KeyValueUtil {
/*************** misc **********************************/
/**
- * n * @return cell if it is an object of class {@link KeyValue} else we will return
- * a new {@link KeyValue} instance made from cell Note: Even if the cell is an object
- * of any of the subclass of {@link KeyValue}, we will create a new {@link KeyValue} object
- * wrapping same buffer. This API is used only with MR based tools which expect the type to be
- * exactly KeyValue. That is the reason for doing this way.
+ * @return cell if it is an object of class {@link KeyValue} else we will return a
+ * new {@link KeyValue} instance made from cell Note: Even if the cell is an
+ * object of any of the subclass of {@link KeyValue}, we will create a new
+ * {@link KeyValue} object wrapping same buffer. This API is used only with MR based tools
+ * which expect the type to be exactly KeyValue. That is the reason for doing this way.
* @deprecated without any replacement.
*/
@Deprecated
@@ -444,8 +444,9 @@ public class KeyValueUtil {
}
/**
- * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable. nn
- * * @return Length written on stream n * @see #create(DataInput) for the inverse function
+ * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable.
+ * @return Length written on stream
+ * @see #create(DataInput) for the inverse function
*/
public static long write(final KeyValue kv, final DataOutput out) throws IOException {
// This is how the old Writables write used to serialize KVs. Need to figure
@@ -639,7 +640,7 @@ public class KeyValueUtil {
* @param in inputStream to read.
* @param withTags whether the keyvalue should include tags are not
* @return Created KeyValue OR if we find a length of zero, we will return null which can be
- * useful marking a stream as done. n
+ * useful marking a stream as done.
*/
public static KeyValue createKeyValueFromInputStream(InputStream in, boolean withTags)
throws IOException {
@@ -663,24 +664,24 @@ public class KeyValueUtil {
}
/**
- * n * @return A KeyValue made of a byte array that holds the key-only part. Needed to convert
- * hfile index members to KeyValues.
+ * Returns a KeyValue made of a byte array that holds the key-only part. Needed to convert hfile
+ * index members to KeyValues.
*/
public static KeyValue createKeyValueFromKey(final byte[] b) {
return createKeyValueFromKey(b, 0, b.length);
}
/**
- * n * @return A KeyValue made of a byte buffer that holds the key-only part. Needed to convert
- * hfile index members to KeyValues.
+ * Return a KeyValue made of a byte buffer that holds the key-only part. Needed to convert hfile
+ * index members to KeyValues.
*/
public static KeyValue createKeyValueFromKey(final ByteBuffer bb) {
return createKeyValueFromKey(bb.array(), bb.arrayOffset(), bb.limit());
}
/**
- * nnn * @return A KeyValue made of a byte array that holds the key-only part. Needed to convert
- * hfile index members to KeyValues.
+ * Return a KeyValue made of a byte array that holds the key-only part. Needed to convert hfile
+ * index members to KeyValues.
*/
public static KeyValue createKeyValueFromKey(final byte[] b, final int o, final int l) {
byte[] newb = new byte[l + KeyValue.ROW_OFFSET];
@@ -691,19 +692,19 @@ public class KeyValueUtil {
}
/**
- * n * Where to read bytes from. Creates a byte array to hold the KeyValue backing bytes copied
- * from the steam.
+ * Where to read bytes from. Creates a byte array to hold the KeyValue backing bytes copied from
+ * the steam.
* @return KeyValue created by deserializing from in OR if we find a length of zero,
- * we will return null which can be useful marking a stream as done. n
+ * we will return null which can be useful marking a stream as done.
*/
public static KeyValue create(final DataInput in) throws IOException {
return create(in.readInt(), in);
}
/**
- * Create a KeyValue reading length from in nn * @return Created
- * KeyValue OR if we find a length of zero, we will return null which can be useful marking a
- * stream as done. n
+ * Create a KeyValue reading length from in
+ * @return Created KeyValue OR if we find a length of zero, we will return null which can be
+ * useful marking a stream as done.
*/
public static KeyValue create(int length, final DataInput in) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
index 1b035966da2..58c4b2d1cf1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
@@ -1046,7 +1046,7 @@ public final class PrivateCellUtil {
* Writes the row from the given cell to the output stream excluding the common prefix
* @param out The dataoutputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param rlength the row length n
+ * @param rlength the row length
*/
public static void writeRowSkippingBytes(DataOutputStream out, Cell cell, short rlength,
int commonPrefix) throws IOException {
@@ -1234,7 +1234,6 @@ public final class PrivateCellUtil {
/**
* Compares only the key portion of a cell. It does not include the sequence id/mvcc of the cell
- * nn
* @return an int greater than 0 if left > than right lesser than 0 if left < than right
* equal to 0 if left is equal to right
*/
@@ -2195,7 +2194,7 @@ public final class PrivateCellUtil {
/**
* Writes the Cell's key part as it would have serialized in a KeyValue. The format is <2 bytes
* rk len><rk><1 byte cf len><cf><qualifier><8 bytes
- * timestamp><1 byte type> nnn
+ * timestamp><1 byte type>
*/
public static void writeFlatKey(Cell cell, DataOutput out) throws IOException {
short rowLen = cell.getRowLength();
@@ -2227,7 +2226,7 @@ public final class PrivateCellUtil {
/**
* Deep clones the given cell if the cell supports deep cloning
* @param cell the cell to be cloned
- * @return the cloned cell n
+ * @return the cloned cell
*/
public static Cell deepClone(Cell cell) throws CloneNotSupportedException {
if (cell instanceof ExtendedCell) {
@@ -2241,7 +2240,7 @@ public final class PrivateCellUtil {
* @param cell the cell to be written
* @param out the outputstream
* @param withTags if tags are to be written or not
- * @return the total bytes written n
+ * @return the total bytes written
*/
public static int writeCell(Cell cell, OutputStream out, boolean withTags) throws IOException {
if (cell instanceof ExtendedCell) {
@@ -2316,8 +2315,8 @@ public final class PrivateCellUtil {
/**
* Sets the given seqId to the cell. Marked as audience Private as of 1.2.0. Setting a Cell
- * sequenceid is an internal implementation detail not for general public use. nn * @throws
- * IOException when the passed cell is not of type {@link ExtendedCell}
+ * sequenceid is an internal implementation detail not for general public use.
+ * @throws IOException when the passed cell is not of type {@link ExtendedCell}
*/
public static void setSequenceId(Cell cell, long seqId) throws IOException {
if (cell instanceof ExtendedCell) {
@@ -2329,8 +2328,8 @@ public final class PrivateCellUtil {
}
/**
- * Sets the given timestamp to the cell. nn * @throws IOException when the passed cell is not of
- * type {@link ExtendedCell}
+ * Sets the given timestamp to the cell.
+ * @throws IOException when the passed cell is not of type {@link ExtendedCell}
*/
public static void setTimestamp(Cell cell, long ts) throws IOException {
if (cell instanceof ExtendedCell) {
@@ -2386,7 +2385,7 @@ public final class PrivateCellUtil {
* Writes the row from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param rlength the row length n
+ * @param rlength the row length
*/
public static void writeRow(OutputStream out, Cell cell, short rlength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2401,7 +2400,7 @@ public final class PrivateCellUtil {
* Writes the family from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param flength the family length n
+ * @param flength the family length
*/
public static void writeFamily(OutputStream out, Cell cell, byte flength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2416,7 +2415,7 @@ public final class PrivateCellUtil {
* Writes the qualifier from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param qlength the qualifier length n
+ * @param qlength the qualifier length
*/
public static void writeQualifier(OutputStream out, Cell cell, int qlength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2432,7 +2431,7 @@ public final class PrivateCellUtil {
* Writes the qualifier from the given cell to the output stream excluding the common prefix
* @param out The dataoutputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param qlength the qualifier length n
+ * @param qlength the qualifier length
*/
public static void writeQualifierSkippingBytes(DataOutputStream out, Cell cell, int qlength,
int commonPrefix) throws IOException {
@@ -2451,7 +2450,7 @@ public final class PrivateCellUtil {
* Writes the value from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param vlength the value length n
+ * @param vlength the value length
*/
public static void writeValue(OutputStream out, Cell cell, int vlength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2466,7 +2465,7 @@ public final class PrivateCellUtil {
* Writes the tag from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param tagsLength the tag length n
+ * @param tagsLength the tag length
*/
public static void writeTags(OutputStream out, Cell cell, int tagsLength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2499,7 +2498,8 @@ public final class PrivateCellUtil {
}
/**
- * Converts the rowkey bytes of the given cell into an int value n * @return rowkey as int
+ * Converts the rowkey bytes of the given cell into an int value
+ * @return rowkey as int
*/
public static int getRowAsInt(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2510,7 +2510,8 @@ public final class PrivateCellUtil {
}
/**
- * Converts the value bytes of the given cell into a long value n * @return value as long
+ * Converts the value bytes of the given cell into a long value
+ * @return value as long
*/
public static long getValueAsLong(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2521,7 +2522,8 @@ public final class PrivateCellUtil {
}
/**
- * Converts the value bytes of the given cell into a int value n * @return value as int
+ * Converts the value bytes of the given cell into a int value
+ * @return value as int
*/
public static int getValueAsInt(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2532,7 +2534,8 @@ public final class PrivateCellUtil {
}
/**
- * Converts the value bytes of the given cell into a double value n * @return value as double
+ * Converts the value bytes of the given cell into a double value
+ * @return value as double
*/
public static double getValueAsDouble(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2543,7 +2546,8 @@ public final class PrivateCellUtil {
}
/**
- * Converts the value bytes of the given cell into a BigDecimal n * @return value as BigDecimal
+ * Converts the value bytes of the given cell into a BigDecimal
+ * @return value as BigDecimal
*/
public static BigDecimal getValueAsBigDecimal(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2764,8 +2768,9 @@ public final class PrivateCellUtil {
/**
* Estimate based on keyvalue's serialization format in the RPC layer. Note that there is an extra
* SIZEOF_INT added to the size here that indicates the actual length of the cell for cases where
- * cell's are serialized in a contiguous format (For eg in RPCs). n * @return Estimate of the
- * cell size in bytes plus an extra SIZEOF_INT indicating the actual cell length.
+ * cell's are serialized in a contiguous format (For eg in RPCs).
+ * @return Estimate of the cell size in bytes plus an extra SIZEOF_INT indicating the
+ * actual cell length.
*/
public static int estimatedSerializedSizeOf(final Cell cell) {
return cell.getSerializedSize() + Bytes.SIZEOF_INT;
@@ -2785,9 +2790,9 @@ public final class PrivateCellUtil {
/**
* This method exists just to encapsulate how we serialize keys. To be replaced by a factory that
* we query to figure what the Cell implementation is and then, what serialization engine to use
- * and further, how to serialize the key for inclusion in hfile index. TODO. n * @return The key
- * portion of the Cell serialized in the old-school KeyValue way or null if passed a null
- * cell
+ * and further, how to serialize the key for inclusion in hfile index. TODO.
+ * @return The key portion of the Cell serialized in the old-school KeyValue way or null if passed
+ * a null cell
*/
public static byte[] getCellKeySerializedAsKeyValueKey(final Cell cell) {
if (cell == null) return null;
@@ -2797,8 +2802,8 @@ public final class PrivateCellUtil {
}
/**
- * Create a Cell that is smaller than all other possible Cells for the given Cell's row. n
- * * @return First possible Cell on passed Cell's row.
+ * Create a Cell that is smaller than all other possible Cells for the given Cell's row.
+ * @return First possible Cell on passed Cell's row.
*/
public static Cell createFirstOnRow(final Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2862,8 +2867,8 @@ public final class PrivateCellUtil {
/**
* Create a Cell that is smaller than all other possible Cells for the given Cell's rk:cf and
- * passed qualifier. nnnn * @return Last possible Cell on passed Cell's rk:cf and passed
- * qualifier.
+ * passed qualifier.
+ * @return Last possible Cell on passed Cell's rk:cf and passed qualifier.
*/
public static Cell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffest, int qlength) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2883,7 +2888,7 @@ public final class PrivateCellUtil {
* Creates the first cell with the row/family/qualifier of this cell and the given timestamp. Uses
* the "maximum" type that guarantees that the new cell is the lowest possible for this
* combination of row, family, qualifier, and timestamp. This cell's own timestamp is ignored.
- * @param cell - cell n
+ * @param cell - cell
*/
public static Cell createFirstOnRowColTS(Cell cell, long ts) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2901,8 +2906,8 @@ public final class PrivateCellUtil {
}
/**
- * Create a Cell that is larger than all other possible Cells for the given Cell's row. n
- * * @return Last possible Cell on passed Cell's row.
+ * Create a Cell that is larger than all other possible Cells for the given Cell's row.
+ * @return Last possible Cell on passed Cell's row.
*/
public static Cell createLastOnRow(final Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2919,7 +2924,8 @@ public final class PrivateCellUtil {
/**
* Create a Cell that is larger than all other possible Cells for the given Cell's rk:cf:q. Used
* in creating "fake keys" for the multi-column Bloom filter optimization to skip the row/column
- * we already know is not in the file. n * @return Last possible Cell on passed Cell's rk:cf:q.
+ * we already know is not in the file.
+ * @return Last possible Cell on passed Cell's rk:cf:q.
*/
public static Cell createLastOnRowCol(final Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
index be8e4e769ba..9a2a29356b1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
@@ -95,7 +95,7 @@ public abstract class BaseDecoder implements Codec.Decoder {
/**
* Extract a Cell.
* @return a parsed Cell or throws an Exception. EOFException or a generic IOException maybe
- * thrown if EOF is reached prematurely. Does not return null. n
+ * thrown if EOF is reached prematurely. Does not return null.
*/
@NonNull
protected abstract Cell parseCell() throws IOException;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
index e7facdbfbf2..f4552c03826 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
@@ -62,7 +62,7 @@ public class CellCodec implements Codec {
}
/**
- * Write int length followed by array bytes. nnnn
+ * Write int length followed by array bytes.
*/
private void write(final byte[] bytes, final int offset, final int length) throws IOException {
// TODO add BB backed os check and do for write. Pass Cell
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
index 75e3d48d9fa..07bfb53d5df 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
@@ -65,7 +65,7 @@ public class CellCodecWithTags implements Codec {
}
/**
- * Write int length followed by array bytes. nnnn
+ * Write int length followed by array bytes.
*/
private void write(final byte[] bytes, final int offset, final int length) throws IOException {
this.out.write(Bytes.toBytes(length));
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
index 86a2fefae7a..2b21546a72a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
@@ -82,7 +82,7 @@ public class ByteBufferOutputStream extends OutputStream implements ByteBufferWr
}
/**
- * This flips the underlying BB so be sure to use it _last_! n
+ * This flips the underlying BB so be sure to use it _last_!
*/
public ByteBuffer getByteBuffer() {
curBuf.flip();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
index 1613bd563d0..d1310137e8c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
@@ -38,14 +38,14 @@ public interface CellOutputStream {
* Implementation must copy the entire state of the Cell. If the written Cell is modified
* immediately after the write method returns, the modifications must have absolutely no effect on
* the copy of the Cell that was added in the write.
- * @param cell Cell to write out n
+ * @param cell Cell to write out
*/
void write(Cell cell) throws IOException;
/**
* Let the implementation decide what to do. Usually means writing accumulated data into a byte[]
* that can then be read from the implementation to be sent to disk, put in the block cache, or
- * sent over the network. n
+ * sent over the network.
*/
void flush() throws IOException;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
index 593802bf3b6..08942426f87 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
@@ -154,8 +154,9 @@ public class ImmutableBytesWritable implements WritableComparable
* The encryptor's state will be finalized. It should be reinitialized or returned to the pool.
* @param out ciphertext
- * @param src plaintext nnnn
+ * @param src plaintext
*/
public static void encrypt(OutputStream out, byte[] src, int offset, int length, Encryptor e)
throws IOException {
@@ -333,7 +333,7 @@ public final class Encryption {
/**
* Encrypt a block of plaintext
* @param out ciphertext
- * @param src plaintext nnnnn
+ * @param src plaintext
*/
public static void encrypt(OutputStream out, byte[] src, int offset, int length, Context context,
byte[] iv) throws IOException {
@@ -349,7 +349,7 @@ public final class Encryption {
*
* The encryptor's state will be finalized. It should be reinitialized or returned to the pool.
* @param out ciphertext
- * @param in plaintext nn
+ * @param in plaintext
*/
public static void encrypt(OutputStream out, InputStream in, Encryptor e) throws IOException {
OutputStream cout = e.createEncryptionStream(out);
@@ -363,7 +363,7 @@ public final class Encryption {
/**
* Encrypt a stream of plaintext given a context and IV
* @param out ciphertext
- * @param in plaintet nnn
+ * @param in plaintet
*/
public static void encrypt(OutputStream out, InputStream in, Context context, byte[] iv)
throws IOException {
@@ -378,7 +378,6 @@ public final class Encryption {
* Decrypt a block of ciphertext read in from a stream with the given cipher and context
*
* The decryptor's state will be finalized. It should be reinitialized or returned to the pool.
- * nnnnnn
*/
public static void decrypt(byte[] dest, int destOffset, InputStream in, int destSize, Decryptor d)
throws IOException {
@@ -391,7 +390,7 @@ public final class Encryption {
}
/**
- * Decrypt a block of ciphertext from a stream given a context and IV nnnnnnn
+ * Decrypt a block of ciphertext from a stream given a context and IV
*/
public static void decrypt(byte[] dest, int destOffset, InputStream in, int destSize,
Context context, byte[] iv) throws IOException {
@@ -402,7 +401,7 @@ public final class Encryption {
}
/**
- * Decrypt a stream of ciphertext given a decryptor nnnnn
+ * Decrypt a stream of ciphertext given a decryptor
*/
public static void decrypt(OutputStream out, InputStream in, int outLen, Decryptor d)
throws IOException {
@@ -425,7 +424,7 @@ public final class Encryption {
}
/**
- * Decrypt a stream of ciphertext given a context and IV nnnnnn
+ * Decrypt a stream of ciphertext given a context and IV
*/
public static void decrypt(OutputStream out, InputStream in, int outLen, Context context,
byte[] iv) throws IOException {
@@ -436,7 +435,8 @@ public final class Encryption {
}
/**
- * Resolves a key for the given subject nn * @return a key for the given subject
+ * Resolves a key for the given subject
+ * @return a key for the given subject
* @throws IOException if the key is not found
*/
public static Key getSecretKeyForSubject(String subject, Configuration conf) throws IOException {
@@ -460,7 +460,7 @@ public final class Encryption {
* @param in plaintext
* @param conf configuration
* @param cipher the encryption algorithm
- * @param iv the initialization vector, can be null n
+ * @param iv the initialization vector, can be null
*/
public static void encryptWithSubjectKey(OutputStream out, InputStream in, String subject,
Configuration conf, Cipher cipher, byte[] iv) throws IOException {
@@ -482,7 +482,7 @@ public final class Encryption {
* @param subject the subject's key alias
* @param conf configuration
* @param cipher the encryption algorithm
- * @param iv the initialization vector, can be null n
+ * @param iv the initialization vector, can be null
*/
public static void decryptWithSubjectKey(OutputStream out, InputStream in, int outLen,
String subject, Configuration conf, Cipher cipher, byte[] iv) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java
index f030de3e174..34f0fa4c0f7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java
@@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface Encryptor {
/**
- * Set the secret key n
+ * Set the secret key
*/
public void setKey(Key key);
@@ -50,12 +50,12 @@ public interface Encryptor {
public byte[] getIv();
/**
- * Set the initialization vector n
+ * Set the initialization vector
*/
public void setIv(byte[] iv);
/**
- * Create a stream for encryption n
+ * Create a stream for encryption
*/
public OutputStream createEncryptionStream(OutputStream out);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java
index 6c6ec5dd759..0852bc7f13f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java
@@ -31,13 +31,13 @@ public interface KeyProvider {
public static final String PASSWORDFILE = "passwordfile";
/**
- * Initialize the key provider n
+ * Initialize the key provider
*/
public void init(String params);
/**
- * Retrieve the key for a given key aliase n * @return the keys corresponding to the supplied
- * alias, or null if a key is not found
+ * Retrieve the key for a given key aliase
+ * @return the keys corresponding to the supplied alias, or null if a key is not found
*/
public Key getKey(String alias);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
index 7f13b2c6f66..52825b6c683 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
@@ -90,9 +90,8 @@ public interface DataBlockEncoder {
EncodedSeeker createSeeker(HFileBlockDecodingContext decodingCtx);
/**
- * Creates a encoder specific encoding context n * store configuration n * encoding strategy used
- * n * header bytes to be written, put a dummy header here if the header is unknown n * HFile meta
- * data
+ * Creates a encoder specific encoding context store configuration encoding strategy used header
+ * bytes to be written, put a dummy header here if the header is unknown HFile meta data
* @return a newly created encoding context
*/
HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf,
@@ -100,7 +99,7 @@ public interface DataBlockEncoder {
/**
* Creates an encoder specific decoding context, which will prepare the data before actual
- * decoding n * store configuration n * HFile meta data
+ * decoding store configuration HFile meta data
* @return a newly created decoding context
*/
HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, HFileContext meta);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
index 21f6c92ef35..4eba8fd854e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
@@ -99,7 +99,7 @@ public enum DataBlockEncoding {
/**
* Writes id bytes to the given array starting from offset.
* @param dest output array
- * @param offset starting offset of the output array n
+ * @param offset starting offset of the output array
*/
// System.arraycopy is static native. Nothing we can do this until we have minimum JDK 9.
@SuppressWarnings("UnsafeFinalization")
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
index 3948aee35ae..68b300ae60f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
@@ -181,7 +181,7 @@ public class EncodedDataBlock {
* @param inputBuffer Array to be compressed.
* @param offset Offset to beginning of the data.
* @param length Length to be compressed.
- * @return Size of compressed data in bytes. n
+ * @return Size of compressed data in bytes.
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH_EXCEPTION",
justification = "No sure what findbugs wants but looks to me like no NPE")
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
index 6835a8bac3c..63f173c38cc 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
@@ -32,9 +32,9 @@ public interface HFileBlockDecodingContext {
/**
* Perform all actions that need to be done before the encoder's real decoding process.
* Decompression needs to be done if {@link HFileContext#getCompression()} returns a valid
- * compression algorithm. n * numBytes after block and encoding headers n * numBytes without
- * header required to store the block after decompressing (not decoding) n * ByteBuffer pointed
- * after the header but before the data n * on disk data to be decoded
+ * compression algorithm. numBytes after block and encoding headers numBytes without header
+ * required to store the block after decompressing (not decoding) ByteBuffer pointed after the
+ * header but before the data on disk data to be decoded
*/
void prepareDecoding(int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader,
ByteBuff blockBufferWithoutHeader, ByteBuff onDiskBlock) throws IOException;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java
index ed97147ac9b..ad193cad613 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java
@@ -91,7 +91,7 @@ public enum IndexBlockEncoding {
/**
* Writes id bytes to the given array starting from offset.
* @param dest output array
- * @param offset starting offset of the output array n
+ * @param offset starting offset of the output array
*/
public void writeIdInBytes(byte[] dest, int offset) throws IOException {
System.arraycopy(idInBytes, 0, dest, offset, ID_SIZE);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
index 5a61622101b..a2e63b9fda0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
@@ -93,7 +93,8 @@ public class ThrottledInputStream extends InputStream {
/**
* Read bytes starting from the specified position. This requires rawStream is an instance of
- * {@link PositionedReadable}. nnnn * @return the number of bytes read
+ * {@link PositionedReadable}.
+ * @return the number of bytes read
*/
public int read(long position, byte[] buffer, int offset, int length) throws IOException {
if (!(rawStream instanceof PositionedReadable)) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
index 157df98a9b0..b1ab8a9b28d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
@@ -80,7 +80,7 @@ public interface Dictionary {
* @param data the data to be written in byte[]
* @param offset the offset
* @param length length to be written
- * @param dict the dictionary whose contents are to written n
+ * @param dict the dictionary whose contents are to written
*/
public static void write(OutputStream out, byte[] data, int offset, int length, Dictionary dict)
throws IOException {
@@ -103,7 +103,7 @@ public interface Dictionary {
* @param data the data to be written in ByteBuffer
* @param offset the offset
* @param length length to be written
- * @param dict the dictionary whose contents are to written n
+ * @param dict the dictionary whose contents are to written
*/
public static void write(OutputStream out, ByteBuffer data, int offset, int length,
Dictionary dict) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
index 7cfa007478f..97e1e9d3345 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
@@ -118,8 +118,8 @@ public class StreamUtils {
}
/**
- * Reads a varInt value stored in an array. n * Input array where the varInt is available n *
- * Offset in the input array where varInt is available
+ * Reads a varInt value stored in an array. Input array where the varInt is available Offset in
+ * the input array where varInt is available
* @return A pair of integers in which first value is the actual decoded varInt value and second
* value as number of bytes taken by this varInt for it's storage in the input array.
* @throws IOException When varint is malformed and not able to be read correctly
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
index 27eca9479d6..9e77bfcd04b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
@@ -85,7 +85,8 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
public abstract int position();
/**
- * Sets this ByteBuff's position to the given value. n * @return this object
+ * Sets this ByteBuff's position to the given value.
+ * @return this object
*/
public abstract ByteBuff position(int position);
@@ -184,7 +185,7 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
public abstract byte get();
/**
- * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers n
+ * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers
* @return the byte at the given index
*/
public abstract byte get(int index);
@@ -244,7 +245,8 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
public abstract ByteBuff put(byte[] src, int offset, int length);
/**
- * Copies from the given byte[] to this ByteBuff n * @return this ByteBuff
+ * Copies from the given byte[] to this ByteBuff
+ * @return this ByteBuff
* @param src source byte array
* @return this ByteBuff
*/
@@ -269,14 +271,15 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
* Fetches the short value at the given index. Does not change position of the underlying
* ByteBuffers. The caller is sure that the index will be after the current position of this
* ByteBuff. So even if the current short does not fit in the current item we can safely move to
- * the next item and fetch the remaining bytes forming the short n * @return the short value at
- * the given index
+ * the next item and fetch the remaining bytes forming the short
+ * @return the short value at the given index
*/
public abstract short getShort(int index);
/**
* Fetches the short value at the given offset from current position. Does not change position of
- * the underlying ByteBuffers. n * @return the short value at the given index.
+ * the underlying ByteBuffers.
+ * @return the short value at the given index.
*/
public abstract short getShortAfterPosition(int offset);
@@ -319,13 +322,15 @@ public abstract class ByteBuff implements HBaseReferenceCounted {
* Fetches the long at the given index. Does not change position of the underlying ByteBuffers.
* The caller is sure that the index will be after the current position of this ByteBuff. So even
* if the current long does not fit in the current item we can safely move to the next item and
- * fetch the remaining bytes forming the long n * @return the long value at the given index
+ * fetch the remaining bytes forming the long
+ * @return the long value at the given index
*/
public abstract long getLong(int index);
/**
* Fetches the long value at the given offset from current position. Does not change position of
- * the underlying ByteBuffers. n * @return the long value at the given index.
+ * the underlying ByteBuffers.
+ * @return the long value at the given index.
*/
public abstract long getLongAfterPosition(int offset);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
index c55ee021bd0..ddd567eb4b9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
@@ -149,8 +149,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers n
- * * @return the byte at the given index
+ * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers
+ * @return the byte at the given index
*/
@Override
public byte get(int index) {
@@ -201,8 +201,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Fetches the int at the given index. Does not change position of the underlying ByteBuffers n
- * * @return the int value at the given index
+ * Fetches the int at the given index. Does not change position of the underlying ByteBuffers
+ * @return the int value at the given index
*/
@Override
public int getInt(int index) {
@@ -235,8 +235,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Fetches the short at the given index. Does not change position of the underlying ByteBuffers n
- * * @return the short value at the given index
+ * Fetches the short at the given index. Does not change position of the underlying ByteBuffers
+ * @return the short value at the given index
*/
@Override
public short getShort(int index) {
@@ -347,8 +347,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Fetches the long at the given index. Does not change position of the underlying ByteBuffers n
- * * @return the long value at the given index
+ * Fetches the long at the given index. Does not change position of the underlying ByteBuffers
+ * @return the long value at the given index
*/
@Override
public long getLong(int index) {
@@ -388,7 +388,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Sets this MBB's position to the given value. n * @return this object
+ * Sets this MBB's position to the given value.
+ * @return this object
*/
@Override
public MultiByteBuff position(int position) {
@@ -569,7 +570,7 @@ public class MultiByteBuff extends ByteBuff {
/**
* Copies the content from this MBB's current position to the byte array and fills it. Also
- * advances the position of the MBB by the length of the byte[]. n
+ * advances the position of the MBB by the length of the byte[].
*/
@Override
public void get(byte[] dst) {
@@ -615,7 +616,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Marks the limit of this MBB. n * @return This MBB
+ * Marks the limit of this MBB.
+ * @return This MBB
*/
@Override
public MultiByteBuff limit(int limit) {
@@ -686,8 +688,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Writes a byte to this MBB at the current position and increments the position n * @return this
- * object
+ * Writes a byte to this MBB at the current position and increments the position
+ * @return this object
*/
@Override
public MultiByteBuff put(byte b) {
@@ -960,7 +962,7 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Jumps the current position of this MBB by specified length. n
+ * Jumps the current position of this MBB by specified length.
*/
@Override
public MultiByteBuff skip(int length) {
@@ -982,7 +984,7 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Jumps back the current position of this MBB by specified length. n
+ * Jumps back the current position of this MBB by specified length.
*/
@Override
public MultiByteBuff moveBack(int length) {
@@ -1109,8 +1111,8 @@ public class MultiByteBuff extends ByteBuff {
}
/**
- * Copy the content from this MBB to a byte[] based on the given offset and length n * the
- * position from where the copy should start n * the length upto which the copy has to be done
+ * Copy the content from this MBB to a byte[] based on the given offset and length the position
+ * from where the copy should start the length upto which the copy has to be done
* @return byte[] with the copied contents from this MBB.
*/
@Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
index 9ef9e2ddc17..e2cac4b6b56 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
@@ -179,7 +179,7 @@ public abstract class User {
/**
* Wraps an underlying {@code UserGroupInformation} instance.
- * @param ugi The base Hadoop user n
+ * @param ugi The base Hadoop user
*/
public static User create(UserGroupInformation ugi) {
if (ugi == null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
index fcf6cc64896..436b5bbc69a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
@@ -181,7 +181,7 @@ public class UserProvider extends BaseConfigurable {
/**
* Wraps an underlying {@code UserGroupInformation} instance.
- * @param ugi The base Hadoop user n
+ * @param ugi The base Hadoop user
*/
public User create(UserGroupInformation ugi) {
if (ugi == null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
index 179074ef00c..88ee9c9666a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
@@ -69,7 +69,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange
/**
* Update the beginning of this range. {@code offset + length} may not be greater than
- * {@code bytes.length}. Resets {@code position} to 0. n * the new start of this range.
+ * {@code bytes.length}. Resets {@code position} to 0. the new start of this range.
* @return this.
*/
@Override
@@ -82,7 +82,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange
/**
* Update the length of this range. {@code offset + length} should not be greater than
* {@code bytes.length}. If {@code position} is greater than the new {@code length}, sets
- * {@code position} to {@code length}. n * The new length of this range.
+ * {@code position} to {@code length}. The new length of this range.
* @return this.
*/
@Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
index 32c6779bc04..be1868b70d7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
@@ -865,7 +865,7 @@ public final class ByteBufferUtils {
}
/**
- * n * ByteBuffer to hash n * offset to start from n * length to hash
+ * ByteBuffer to hash offset to start from length to hash
*/
public static int hashCode(ByteBuffer buf, int offset, int length) {
int hash = 1;
@@ -980,7 +980,7 @@ public final class ByteBufferUtils {
* @param buf The ByteBuffer
* @param offset Offset to int value
* @param length Number of bytes used to store the int value.
- * @return the int value n * if there's not enough bytes left in the buffer after the given offset
+ * @return the int value if there's not enough bytes left in the buffer after the given offset
*/
public static int readAsInt(ByteBuffer buf, int offset, final int length) {
if (offset + length > buf.limit()) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
index 64bd5cb3b6c..4addf9057e2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
@@ -63,14 +63,13 @@ public interface ByteRange extends Comparable {
/**
* Nullifies this ByteRange. That is, it becomes a husk, being a range over no byte[] whatsoever.
- * n
*/
public ByteRange unset();
/**
* Reuse this {@code ByteRange} over a new byte[]. {@code offset} is set to 0 and {@code length}
* is set to {@code capacity}.
- * @param capacity the size of a new byte[]. n
+ * @param capacity the size of a new byte[].
*/
public ByteRange set(int capacity);
@@ -78,7 +77,7 @@ public interface ByteRange extends Comparable {
* Reuse this {@code ByteRange} over a new byte[]. {@code offset} is set to 0 and {@code length}
* is set to {@code bytes.length}. A null {@code bytes} IS supported, in which case this method
* will behave equivalently to {@link #unset()}.
- * @param bytes the array to wrap. n
+ * @param bytes the array to wrap.
*/
public ByteRange set(byte[] bytes);
@@ -188,21 +187,21 @@ public interface ByteRange extends Comparable {
/**
* Store the short value at {@code index}
* @param index the index in the range where {@code val} is stored
- * @param val the value to store n
+ * @param val the value to store
*/
public ByteRange putShort(int index, short val);
/**
* Store the int value at {@code index}
* @param index the index in the range where {@code val} is stored
- * @param val the value to store n
+ * @param val the value to store
*/
public ByteRange putInt(int index, int val);
/**
* Store the long value at {@code index}
* @param index the index in the range where {@code val} is stored
- * @param val the value to store n
+ * @param val the value to store
*/
public ByteRange putLong(int index, long val);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
index d6662506040..0203cc390fe 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
@@ -231,8 +231,9 @@ public class Bytes implements Comparable {
}
/**
- * Compares the bytes in this object to the specified byte array n * @return Positive if left is
- * bigger than right, 0 if they are equal, and negative if left is smaller than right.
+ * Compares the bytes in this object to the specified byte array
+ * @return Positive if left is bigger than right, 0 if they are equal, and negative if left is
+ * smaller than right.
*/
public int compareTo(final byte[] that) {
return BYTES_RAWCOMPARATOR.compare(this.bytes, this.offset, this.length, that, 0, that.length);
@@ -534,7 +535,8 @@ public class Bytes implements Comparable {
/**
* Write a printable representation of a byte array.
- * @param b byte array n * @see #toStringBinary(byte[], int, int)
+ * @param b byte array
+ * @see #toStringBinary(byte[], int, int)
*/
public static String toStringBinary(final byte[] b) {
if (b == null) return "null";
@@ -2055,7 +2057,7 @@ public class Bytes implements Comparable {
* Copy the byte array given in parameter and return an instance of a new byte array with the same
* length and the same content.
* @param bytes the byte array to copy from
- * @return a copy of the given designated byte array nn
+ * @return a copy of the given designated byte array
*/
public static byte[] copy(byte[] bytes, final int offset, final int length) {
if (bytes == null) return null;
@@ -2236,7 +2238,7 @@ public class Bytes implements Comparable {
}
/**
- * Fill given array with zeros at the specified position. nnn
+ * Fill given array with zeros at the specified position.
*/
public static void zero(byte[] b, int offset, int length) {
checkPositionIndex(offset, b.length, "offset");
@@ -2319,7 +2321,8 @@ public class Bytes implements Comparable {
}
/**
- * Create a byte array which is multiple given bytes nn * @return byte array
+ * Create a byte array which is multiple given bytes
+ * @return byte array
*/
public static byte[] multiple(byte[] srcBytes, int multiNum) {
if (multiNum <= 0) {
@@ -2374,7 +2377,7 @@ public class Bytes implements Comparable {
/**
* Create a byte array from a string of hash digits. The length of the string must be a multiple
- * of 2 n
+ * of 2
*/
public static byte[] fromHex(String hex) {
checkArgument(hex.length() % 2 == 0, "length must be a multiple of 2");
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
index d943803fb2f..dc810834a66 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
@@ -85,8 +85,8 @@ public enum ChecksumType {
}
/**
- * Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes. n
- * * @return Type associated with passed code.
+ * Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes.
+ * @return Type associated with passed code.
*/
public static ChecksumType codeToType(final byte b) {
for (ChecksumType t : ChecksumType.values()) {
@@ -98,8 +98,8 @@ public enum ChecksumType {
}
/**
- * Map a checksum name to a specific type. Do our own names. n * @return Type associated with
- * passed code.
+ * Map a checksum name to a specific type. Do our own names.
+ * @return Type associated with passed code.
*/
public static ChecksumType nameToType(final String name) {
for (ChecksumType t : ChecksumType.values()) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java
index 84e70873727..1b3eef180a5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java
@@ -27,10 +27,9 @@ public class Classes {
/**
* Equivalent of {@link Class#forName(String)} which also returns classes for primitives like
- * boolean , etc. n * The name of the class to retrieve. Can be either a normal class
- * or a primitive class.
- * @return The class specified by className n * If the requested class can not be
- * found.
+ * boolean , etc. The name of the class to retrieve. Can be either a normal class or a
+ * primitive class.
+ * @return The class specified by className If the requested class can not be found.
*/
public static Class> extendedForName(String className) throws ClassNotFoundException {
Class> valueType;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index ca8d27d8eeb..80076495456 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -328,7 +328,7 @@ public final class CommonFSUtils {
* Returns the URI in the string format
* @param c configuration
* @param p path
- * @return - the URI's to string format n
+ * @return - the URI's to string format
*/
public static String getDirUri(final Configuration c, Path p) throws IOException {
if (p.toUri().getScheme() != null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
index 531d12085fe..a5e6a65efc9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
@@ -215,7 +215,7 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
* @param parent the parent class loader for exempted classes
* @param pathPrefix a prefix used in temp path name to store the jar file locally
* @param conf the configuration used to create the class loader, if needed
- * @return a CoprocessorClassLoader for the coprocessor jar path n
+ * @return a CoprocessorClassLoader for the coprocessor jar path
*/
public static CoprocessorClassLoader getClassLoader(final Path path, final ClassLoader parent,
final String pathPrefix, final Configuration conf) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
index 276e436ed13..0cd1b41c502 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
@@ -58,7 +58,7 @@ public class KeyLocker {
}, NB_CONCURRENT_LOCKS);
/**
- * Return a lock for the given key. The lock is already locked. n
+ * Return a lock for the given key. The lock is already locked.
*/
public ReentrantLock acquireLock(K key) {
if (key == null) throw new IllegalArgumentException("key must not be null");
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
index d967f5d53a7..7e143e15de2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
@@ -29,8 +29,8 @@ import org.apache.yetus.audience.InterfaceAudience;
public class MD5Hash {
/**
- * Given a byte array, returns in MD5 hash as a hex string. n * @return SHA1 hash as a 32
- * character hex string.
+ * Given a byte array, returns in MD5 hash as a hex string.
+ * @return SHA1 hash as a 32 character hex string.
*/
public static String getMD5AsHex(byte[] key) {
return getMD5AsHex(key, 0, key.length);
@@ -39,8 +39,8 @@ public class MD5Hash {
/**
* Given a byte array, returns its MD5 hash as a hex string. Only "length" number of bytes
* starting at "offset" within the byte array are used.
- * @param key the key to hash (variable length byte array) nn * @return MD5 hash as a 32 character
- * hex string.
+ * @param key the key to hash (variable length byte array)
+ * @return MD5 hash as a 32 character hex string.
*/
public static String getMD5AsHex(byte[] key, int offset, int length) {
try {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java
index dd8eb4f1858..fe8d111dfbe 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java
@@ -74,14 +74,14 @@ public class Pair implements Serializable {
}
/**
- * Return the first element stored in the pair. n
+ * Return the first element stored in the pair.
*/
public T1 getFirst() {
return first;
}
/**
- * Return the second element stored in the pair. n
+ * Return the second element stored in the pair.
*/
public T2 getSecond() {
return second;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
index 44bc2b81dc0..ef44fc4e043 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
@@ -42,14 +42,14 @@ public class PairOfSameType implements Iterable {
}
/**
- * Return the first element stored in the pair. n
+ * Return the first element stored in the pair.
*/
public T getFirst() {
return first;
}
/**
- * Return the second element stored in the pair. n
+ * Return the second element stored in the pair.
*/
public T getSecond() {
return second;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
index efa52612be6..cb61cfbe246 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
@@ -156,12 +156,12 @@ public interface PositionedByteRange extends ByteRange {
public PositionedByteRange put(byte[] val, int offset, int length);
/**
- * Limits the byte range upto a specified value. Limit cannot be greater than capacity nn
+ * Limits the byte range upto a specified value. Limit cannot be greater than capacity
*/
public PositionedByteRange setLimit(int limit);
/**
- * Return the current limit n
+ * Return the current limit
*/
public int getLimit();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
index c3d4d82f6bd..f73064f70a8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
@@ -77,8 +77,8 @@ public final class PrettyPrinter {
/**
* Convert a human readable string to its value.
- * @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit) nn * @return the value
- * corresponding to the human readable string
+ * @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit)
+ * @return the value corresponding to the human readable string
*/
public static String valueOf(final String pretty, final Unit unit) throws HBaseException {
StringBuilder value = new StringBuilder();
@@ -155,7 +155,8 @@ public final class PrettyPrinter {
* Convert a human readable time interval to seconds. Examples of the human readable time
* intervals are: 50 DAYS 1 HOUR 30 MINUTES , 25000 SECONDS etc. The units of time specified can
* be in uppercase as well as lowercase. Also, if a single number is specified without any time
- * unit, it is assumed to be in seconds. n * @return value in seconds
+ * unit, it is assumed to be in seconds.
+ * @return value in seconds
*/
private static long humanReadableIntervalToSec(final String humanReadableInterval)
throws HBaseException {
@@ -261,7 +262,7 @@ public final class PrettyPrinter {
* KB , 25000 B etc. The units of size specified can be in uppercase as well as lowercase. Also,
* if a single number is specified without any time unit, it is assumed to be in bytes.
* @param humanReadableSize human readable size
- * @return value in bytes n
+ * @return value in bytes
*/
private static long humanReadableSizeToBytes(final String humanReadableSize)
throws HBaseException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
index 24b9f2d997b..868c731e0a8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
@@ -66,22 +66,22 @@ public class SimpleMutableByteRange extends AbstractByteRange {
/**
* Create a new {@code ByteRange} over a new backing array of size {@code capacity}. The range's
- * offset and length are 0 and {@code capacity}, respectively. n * the size of the backing array.
+ * offset and length are 0 and {@code capacity}, respectively. the size of the backing array.
*/
public SimpleMutableByteRange(int capacity) {
this(new byte[capacity]);
}
/**
- * Create a new {@code ByteRange} over the provided {@code bytes}. n * The array to wrap.
+ * Create a new {@code ByteRange} over the provided {@code bytes}. The array to wrap.
*/
public SimpleMutableByteRange(byte[] bytes) {
set(bytes);
}
/**
- * Create a new {@code ByteRange} over the provided {@code bytes}. n * The array to wrap. n * The
- * offset into {@code bytes} considered the beginning of this range. n * The length of this range.
+ * Create a new {@code ByteRange} over the provided {@code bytes}. The array to wrap. The offset
+ * into {@code bytes} considered the beginning of this range. The length of this range.
*/
public SimpleMutableByteRange(byte[] bytes, int offset, int length) {
set(bytes, offset, length);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
index d91fd712f37..68e99c3053b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
@@ -70,7 +70,7 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
/**
* Create a new {@code PositionedByteRange} over a new backing array of size {@code capacity}. The
- * range's offset and length are 0 and {@code capacity}, respectively. n * the size of the backing
+ * range's offset and length are 0 and {@code capacity}, respectively. the size of the backing
* array.
*/
public SimplePositionedMutableByteRange(int capacity) {
@@ -78,17 +78,15 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
}
/**
- * Create a new {@code PositionedByteRange} over the provided {@code bytes}. n * The array to
- * wrap.
+ * Create a new {@code PositionedByteRange} over the provided {@code bytes}. The array to wrap.
*/
public SimplePositionedMutableByteRange(byte[] bytes) {
set(bytes);
}
/**
- * Create a new {@code PositionedByteRange} over the provided {@code bytes}. n * The array to
- * wrap. n * The offset into {@code bytes} considered the beginning of this range. n * The length
- * of this range.
+ * Create a new {@code PositionedByteRange} over the provided {@code bytes}. The array to wrap.
+ * The offset into {@code bytes} considered the beginning of this range. The length of this range.
*/
public SimplePositionedMutableByteRange(byte[] bytes, int offset, int length) {
set(bytes, offset, length);
@@ -130,7 +128,7 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
/**
* Update the beginning of this range. {@code offset + length} may not be greater than
- * {@code bytes.length}. Resets {@code position} to 0. n * the new start of this range.
+ * {@code bytes.length}. Resets {@code position} to 0. the new start of this range.
* @return this.
*/
@Override
@@ -143,7 +141,7 @@ public class SimplePositionedMutableByteRange extends AbstractPositionedByteRang
/**
* Update the length of this range. {@code offset + length} should not be greater than
* {@code bytes.length}. If {@code position} is greater than the new {@code length}, sets
- * {@code position} to {@code length}. n * The new length of this range.
+ * {@code position} to {@code length}. The new length of this range.
* @return this.
*/
@Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java
index 0caecf649ce..e23c62045fa 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java
@@ -27,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface TimeMeasurable {
/**
- * Measure elapsed time. n
+ * Measure elapsed time.
*/
T measure();
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java
index 48b60a49616..3aa8a6ec123 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java
@@ -193,7 +193,7 @@ public final class UnsafeAccess {
/**
* Reads a int value at the given Object's offset considering it was written in big-endian format.
- * nn * @return int value at offset
+ * @return int value at offset
*/
public static int toInt(Object ref, long offset) {
if (LITTLE_ENDIAN) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
index 154bc0e42db..2c600e3c5fd 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
@@ -73,7 +73,7 @@ public class WindowMovingAverage extends MovingAverage {
/**
* Get statistics at index.
- * @param index index of bar n
+ * @param index index of bar
*/
protected long getStatisticsAtIndex(int index) {
if (index < 0 || index >= getNumberOfStatistics()) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
index de0cbdfa918..32cfde410d5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
@@ -106,8 +106,8 @@ public final class ZKConfig {
}
/**
- * Return the ZK Quorum servers string given the specified configuration n * @return Quorum
- * servers String
+ * Return the ZK Quorum servers string given the specified configuration
+ * @return Quorum servers String
*/
private static String getZKQuorumServersStringFromHbaseConfig(Configuration conf) {
String defaultClientPort = Integer.toString(
@@ -191,8 +191,8 @@ public final class ZKConfig {
/**
* Separate the given key into the three configurations it should contain: hbase.zookeeper.quorum,
- * hbase.zookeeper.client.port and zookeeper.znode.parent n * @return the three configuration in
- * the described order n
+ * hbase.zookeeper.client.port and zookeeper.znode.parent
+ * @return the three configuration in the described order
*/
public static ZKClusterKey transformClusterKey(String key) throws IOException {
List parts = Splitter.on(':').splitToList(key);
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index 6bb93c09c09..e959f77a722 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -258,7 +258,7 @@ public class TestHBaseConfiguration {
}
/**
- * Wrapper to fetch the configured {@code List}s. n * Configuration with
+ * Wrapper to fetch the configured {@code List}s. Configuration with
* GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS defined
* @return List of CredentialProviders, or null if they could not be loaded
*/
@@ -283,8 +283,8 @@ public class TestHBaseConfiguration {
/**
* Create a CredentialEntry using the configured Providers. If multiple CredentialProviders are
- * configured, the first will be used. n * Configuration for the CredentialProvider n *
- * CredentialEntry name (alias) n * The credential
+ * configured, the first will be used. Configuration for the CredentialProvider CredentialEntry
+ * name (alias) The credential
*/
public void createEntry(Configuration conf, String name, char[] credential) throws Exception {
if (!isHadoopCredentialProviderAvailable()) {
@@ -303,8 +303,8 @@ public class TestHBaseConfiguration {
/**
* Create a CredentialEntry with the give name and credential in the credentialProvider. The
- * credentialProvider argument must be an instance of Hadoop CredentialProvider. n * Instance of
- * CredentialProvider n * CredentialEntry name (alias) n * The credential to store
+ * credentialProvider argument must be an instance of Hadoop CredentialProvider. Instance of
+ * CredentialProvider CredentialEntry name (alias) The credential to store
*/
private void createEntryInProvider(Object credentialProvider, String name, char[] credential)
throws Exception {
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java
index 583e7efcfa9..0185ebff0ec 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java
@@ -409,7 +409,7 @@ public final class X509TestContext {
* circumstances to inject a "bad" certificate where the keystore doesn't match the CA in the
* truststore. Or use it to create a connection without a truststore.
* @see #setConfigurations(KeyStoreFileType, KeyStoreFileType) which sets both keystore and
- * truststore and is more applicable to general use. nnn
+ * truststore and is more applicable to general use.
*/
public void setKeystoreConfigurations(KeyStoreFileType keyStoreFileType, Configuration confToSet)
throws IOException {
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestHelpers.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestHelpers.java
index 56d3c8cb859..78d70f8f581 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestHelpers.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestHelpers.java
@@ -371,7 +371,7 @@ final class X509TestHelpers {
* @param cert the certificate to serialize.
* @param keyPassword an optional password to encrypt the trust store. If empty or null, the cert
* will not be encrypted.
- * @return the serialized bytes of the BCFKS trust store. nn
+ * @return the serialized bytes of the BCFKS trust store.
*/
public static byte[] certToBCFKSTrustStoreBytes(X509Certificate cert, char[] keyPassword)
throws IOException, GeneralSecurityException {
@@ -434,7 +434,7 @@ final class X509TestHelpers {
* @param privateKey the private key to serialize.
* @param keyPassword an optional key password. If empty or null, the private key will not be
* encrypted.
- * @return the serialized bytes of the BCFKS key store. nn
+ * @return the serialized bytes of the BCFKS key store.
*/
public static byte[] certAndPrivateKeyToBCFKSBytes(X509Certificate cert, PrivateKey privateKey,
char[] keyPassword) throws IOException, GeneralSecurityException {
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RandomDistribution.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RandomDistribution.java
index b07a924a4e3..6635accedbb 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RandomDistribution.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/RandomDistribution.java
@@ -51,7 +51,7 @@ public class RandomDistribution {
/**
* Generate random integers from min (inclusive) to max (exclusive) following even distribution.
- * n * The basic random number generator. n * Minimum integer n * maximum integer (exclusive).
+ * The basic random number generator. Minimum integer maximum integer (exclusive).
*/
public Flat(Random random, int min, int max) {
if (min >= max) {
@@ -82,17 +82,16 @@ public class RandomDistribution {
private final ArrayList v;
/**
- * Constructor n * The random number generator. n * minimum integer (inclusvie) n * maximum
- * integer (exclusive) n * parameter sigma. (sigma > 1.0)
+ * Constructor The random number generator. minimum integer (inclusvie) maximum integer
+ * (exclusive) parameter sigma. (sigma > 1.0)
*/
public Zipf(Random r, int min, int max, double sigma) {
this(r, min, max, sigma, DEFAULT_EPSILON);
}
/**
- * Constructor. n * The random number generator. n * minimum integer (inclusvie) n * maximum
- * integer (exclusive) n * parameter sigma. (sigma > 1.0) n * Allowable error percentage (0 <
- * epsilon < 1.0).
+ * Constructor. The random number generator. minimum integer (inclusvie) maximum integer
+ * (exclusive) parameter sigma. (sigma > 1.0) Allowable error percentage (0 < epsilon < 1.0).
*/
public Zipf(Random r, int min, int max, double sigma, double epsilon) {
if ((max <= min) || (sigma <= 1) || (epsilon <= 0) || (epsilon >= 0.5)) {
@@ -178,8 +177,8 @@ public class RandomDistribution {
/**
* Generate random integers from min (inclusive) to max (exclusive) following Binomial
- * distribution. n * The basic random number generator. n * Minimum integer n * maximum integer
- * (exclusive). n * parameter.
+ * distribution. The basic random number generator. Minimum integer maximum integer (exclusive).
+ * parameter.
*/
public Binomial(Random random, int min, int max, double p) {
if (min >= max) {
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
index f2d3f63dbcb..fd3cfcc8b87 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
@@ -68,7 +68,7 @@ public class MetricSampleQuantiles {
/**
* Specifies the allowable error for this rank, depending on which quantiles are being targeted.
* This is the f(r_i, n) function from the CKMS paper. It's basically how wide the range of this
- * rank can be. n * the index in the list of samples
+ * rank can be. the index in the list of samples
*/
private double allowableError(int rank) {
int size = samples.size();
@@ -208,7 +208,7 @@ public class MetricSampleQuantiles {
/**
* Get a snapshot of the current values of all the tracked quantiles.
- * @return snapshot of the tracked quantiles n * if no items have been added to the estimator
+ * @return snapshot of the tracked quantiles if no items have been added to the estimator
*/
synchronized public Map snapshot() throws IOException {
// flush the buffer first for best results
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 50cefc4c39a..ce1b387bc15 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -252,10 +252,10 @@ public class HttpServer implements FilterContainer {
private int port = -1;
/**
- * Add an endpoint that the HTTP server should listen to. n * the endpoint of that the HTTP
- * server should listen to. The scheme specifies the protocol (i.e. HTTP / HTTPS), the host
- * specifies the binding address, and the port specifies the listening port. Unspecified or zero
- * port means that the server can listen to any port.
+ * Add an endpoint that the HTTP server should listen to. the endpoint of that the HTTP server
+ * should listen to. The scheme specifies the protocol (i.e. HTTP / HTTPS), the host specifies
+ * the binding address, and the port specifies the listening port. Unspecified or zero port
+ * means that the server can listen to any port.
*/
public Builder addEndpoint(URI endpoint) {
endpoints.add(endpoint);
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
index c8456a461bb..494a30c3e77 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
@@ -141,7 +141,7 @@ public class ProxyUserAuthenticationFilter extends AuthenticationFilter {
/**
* The purpose of this function is to get the doAs parameter of a http request case insensitively
- * n * @return doAs parameter if exists or null otherwise
+ * @return doAs parameter if exists or null otherwise
*/
public static String getDoasFromHeader(final HttpServletRequest request) {
String doas = null;
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
index d393187b1e4..978de8530ef 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
@@ -146,8 +146,8 @@ public class JMXJsonServlet extends HttpServlet {
}
/**
- * Process a GET request for the specified resource. n * The servlet request we are processing n *
- * The servlet response we are creating
+ * Process a GET request for the specified resource. The servlet request we are processing The
+ * servlet response we are creating
*/
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException {
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java
index 64119ec5095..cc6a99bd300 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java
@@ -112,7 +112,7 @@ public final class JSONMetricUtil {
* Method for building map used for constructing ObjectName. Mapping is done with arrays indices
* @param keys Map keys
* @param values Map values
- * @return Map or null if arrays are empty * or have different number of elements
+ * @return Map or null if arrays are empty or have different number of elements
*/
@SuppressWarnings("JdkObsolete") // javax requires hashtable param for ObjectName constructor
public static Hashtable buldKeyValueTable(String[] keys, String[] values) {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
index ae7fef86500..314a70acf12 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
@@ -78,7 +78,7 @@ public class DistributedHBaseCluster extends HBaseClusterInterface {
}
/**
- * Returns a ClusterStatus for this HBase cluster n
+ * Returns a ClusterStatus for this HBase cluster
*/
@Override
public ClusterMetrics getClusterMetrics() throws IOException {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
index 7223a1f753e..4da9244a6cd 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
@@ -663,7 +663,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
}
/**
- * After adding data to the table start a mr job to nnn
+ * After adding data to the table start a mr job to
*/
private void runCheck() throws IOException, ClassNotFoundException, InterruptedException {
LOG.info("Running check");
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 068eb574659..0e259f5072a 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -1159,8 +1159,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
}
/**
- * nn * @return Return new byte array that has ordinal as prefix on front taking
- * up Bytes.SIZEOF_SHORT bytes followed by r
+ * Returns new byte array that has ordinal as prefix on front taking up
+ * Bytes.SIZEOF_SHORT bytes followed by r
*/
public static byte[] addPrefixFlag(final int ordinal, final byte[] r) {
byte[] prefix = Bytes.toBytes((short) ordinal);
@@ -1174,7 +1174,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
}
/**
- * n * @return Type from the Counts enum of this row. Reads prefix added by
+ * Returns type from the Counts enum of this row. Reads prefix added by
* {@link #addPrefixFlag(int, byte[])}
*/
public static VerifyCounts whichType(final byte[] bs) {
@@ -1182,9 +1182,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
return VerifyCounts.values()[ordinal];
}
- /**
- * n * @return Row bytes minus the type flag.
- */
+ /** Returns Row bytes minus the type flag. */
public static byte[] getRowOnly(BytesWritable bw) {
byte[] bytes = new byte[bw.getLength() - Bytes.SIZEOF_SHORT];
System.arraycopy(bw.getBytes(), Bytes.SIZEOF_SHORT, bytes, 0, bytes.length);
@@ -1273,7 +1271,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
/**
* Dump out extra info around references if there are any. Helps debugging.
- * @return StringBuilder filled with references if any. n
+ * @return StringBuilder filled with references if any.
*/
@SuppressWarnings("JavaUtilDate")
private StringBuilder dumpExtraInfoOnRefs(final BytesWritable key, final Context context,
@@ -1425,8 +1423,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
}
/**
- * Verify the values in the Counters against the expected number of entries written. n *
- * Expected number of referenced entrires n * The Job's Counters object
+ * Verify the values in the Counters against the expected number of entries written. Expected
+ * number of referenced entrires The Job's Counters object
* @return True if the values match what's expected, false otherwise
*/
protected boolean verifyExpectedValues(long expectedReferenced, Counters counters) {
@@ -1454,7 +1452,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
/**
* Verify that the Counters don't contain values which indicate an outright failure from the
- * Reducers. n * The Job's counters
+ * Reducers. The Job's counters
* @return True if the "bad" counter objects are 0, false otherwise
*/
protected boolean verifyUnexpectedValues(Counters counters) {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java
index 7bf4b4a95af..451e2d760f3 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java
@@ -185,7 +185,7 @@ public class IntegrationTestReplication extends IntegrationTestBigLinkedList {
/**
* This tears down any tables that existed from before and rebuilds the tables and schemas on
* the source cluster. It then sets up replication from the source to the sink cluster by using
- * the {@link org.apache.hadoop.hbase.client.Admin} connection. n
+ * the {@link org.apache.hadoop.hbase.client.Admin} connection.
*/
protected void setupTablesAndReplication() throws Exception {
TableName tableName = getTableName(source.getConfiguration());
@@ -261,7 +261,7 @@ public class IntegrationTestReplication extends IntegrationTestBigLinkedList {
/**
* Run the {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Generator} in the
- * source cluster. This assumes that the tables have been setup via setupTablesAndReplication. n
+ * source cluster. This assumes that the tables have been setup via setupTablesAndReplication.
*/
protected void runGenerator() throws Exception {
Path outputPath = new Path(outputDir);
@@ -282,7 +282,7 @@ public class IntegrationTestReplication extends IntegrationTestBigLinkedList {
* Run the {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Verify} in the sink
* cluster. If replication is working properly the data written at the source cluster should be
* available in the sink cluster after a reasonable gap
- * @param expectedNumNodes the number of nodes we are expecting to see in the sink cluster n
+ * @param expectedNumNodes the number of nodes we are expecting to see in the sink cluster
*/
protected void runVerify(long expectedNumNodes) throws Exception {
Path outputPath = new Path(outputDir);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java
index 60e24be5128..63dc0bb28c8 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java
@@ -36,9 +36,6 @@ public class Driver {
pgd = pgd0;
}
- /**
- * nn
- */
public static void main(String[] args) throws Throwable {
pgd.addClass(RowCounter.NAME, RowCounter.class, "Count rows in HBase table");
ProgramDriver.class.getMethod("driver", new Class[] { String[].class }).invoke(pgd,
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
index 3d609ffd73b..58d8f49839f 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
@@ -74,7 +74,7 @@ public class GroupingTableMap extends MapReduceBase
/**
* Extract the grouping columns from value to construct a new key. Pass the new key and value to
- * reduce. If any of the grouping columns are not found in the value, the record is skipped. nnnnn
+ * reduce. If any of the grouping columns are not found in the value, the record is skipped.
*/
public void map(ImmutableBytesWritable key, Result value,
OutputCollector output, Reporter reporter) throws IOException {
@@ -88,8 +88,8 @@ public class GroupingTableMap extends MapReduceBase
/**
* Extract columns values from the current record. This method returns null if any of the columns
- * are not found. Override this method if you want to deal with nulls differently. n * @return
- * array of byte values
+ * are not found. Override this method if you want to deal with nulls differently.
+ * @return array of byte values
*/
protected byte[][] extractKeyValues(Result r) {
byte[][] keyVals = null;
@@ -115,8 +115,8 @@ public class GroupingTableMap extends MapReduceBase
/**
* Create a key by concatenating multiple column values. Override this function in order to
- * produce different types of keys. n * @return key generated by concatenating multiple column
- * values
+ * produce different types of keys.
+ * @return key generated by concatenating multiple column values
*/
protected ImmutableBytesWritable createGroupKey(byte[][] vals) {
if (vals == null) {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
index 16256942d72..8af0b4b4749 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
@@ -53,7 +53,7 @@ public class IdentityTableMap extends MapReduceBase
}
/**
- * Pass the key, value to reduce nnnnn
+ * Pass the key, value to reduce
*/
public void map(ImmutableBytesWritable key, Result value,
OutputCollector output, Reporter reporter) throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
index 79d5f3dc8c0..29f9478da10 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
@@ -38,7 +38,7 @@ public class IdentityTableReduce extends MapReduceBase
private static final Logger LOG = LoggerFactory.getLogger(IdentityTableReduce.class.getName());
/**
- * No aggregation, output pairs of (key, record) nnnnn
+ * No aggregation, output pairs of (key, record)
*/
public void reduce(ImmutableBytesWritable key, Iterator values,
OutputCollector output, Reporter reporter) throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java
index 24e9da0f28d..0e9f0deaf67 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java
@@ -105,7 +105,6 @@ public class MultiTableSnapshotInputFormat extends TableSnapshotInputFormat
* restoreDir. Sets:
* {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#RESTORE_DIRS_KEY},
* {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#SNAPSHOT_TO_SCANS_KEY}
- * nnnn
*/
public static void setInput(Configuration conf, Map> snapshotScans,
Path restoreDir) throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
index 4f95950589c..2f6324a7ac5 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
@@ -65,9 +65,7 @@ public class RowCounter extends Configured implements Tool {
}
}
- /**
- * n * @return the JobConf n
- */
+ /** Returns the JobConf */
public JobConf createSubmittableJob(String[] args) throws IOException {
JobConf c = new JobConf(getConf(), getClass());
c.setJobName(NAME);
@@ -104,9 +102,6 @@ public class RowCounter extends Configured implements Tool {
return 0;
}
- /**
- * nn
- */
public static void main(String[] args) throws Exception {
int errCode = ToolRunner.run(HBaseConfiguration.create(), new RowCounter(), args);
System.exit(errCode);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
index 34736bd6a3d..667629016d3 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
@@ -210,7 +210,7 @@ public abstract class TableInputFormatBase implements InputFormat {
}
/**
- * Constructor nnnn
+ * Constructor
*/
public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location) {
this.m_tableName = tableName;
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java
index a48ba49058a..8d12fe5d720 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java
@@ -59,7 +59,7 @@ public class CellCreator {
* @param value column value
* @param voffset value offset
* @param vlength value length
- * @return created Cell n
+ * @return created Cell
*/
public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength,
byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset,
@@ -83,7 +83,8 @@ public class CellCreator {
* @param voffset value offset
* @param vlength value length
* @param visExpression visibility expression to be associated with cell
- * @return created Cell n * @deprecated since 0.98.9
+ * @return created Cell
+ * @deprecated since 0.98.9
* @see HBASE-10560
*/
@Deprecated
@@ -111,7 +112,8 @@ public class CellCreator {
* @param timestamp version timestamp
* @param value column value
* @param voffset value offset
- * @param vlength value length n * @return created Cell n
+ * @param vlength value length
+ * @return created Cell
*/
public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength,
byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset,
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
index b41d94fcebb..ccaf55e5025 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
@@ -330,7 +330,7 @@ public class HashTable extends Configured implements Tool {
}
/**
- * Open a TableHash.Reader starting at the first hash at or after the given key. n
+ * Open a TableHash.Reader starting at the first hash at or after the given key.
*/
public Reader newReader(Configuration conf, ImmutableBytesWritable startKey)
throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
index 2bf6e6b5a04..0a811c92ba9 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
@@ -148,7 +148,7 @@ public class ImportTsv extends Configured implements Tool {
/**
* @param columnsSpecification the list of columns to parser out, comma separated. The row key
- * should be the special token TsvParser.ROWKEY_COLUMN_SPEC n
+ * should be the special token TsvParser.ROWKEY_COLUMN_SPEC
*/
public TsvParser(String columnsSpecification, String separatorStr) {
// Configure separator
@@ -416,8 +416,8 @@ public class ImportTsv extends Configured implements Tool {
}
/**
- * Return starting position and length of row key from the specified line bytes. nn * @return
- * Pair of row key offset and length. n
+ * Return starting position and length of row key from the specified line bytes.
+ * @return Pair of row key offset and length.
*/
public Pair parseRowKey(byte[] lineBytes, int length)
throws BadTsvLineException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java
index fb42e332833..ef3179830f9 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java
@@ -72,7 +72,7 @@ public class MultiTableHFileOutputFormat extends HFileOutputFormat2 {
* function will configure the requisite number of reducers to write HFiles for multple tables
* simultaneously
* @param job See {@link org.apache.hadoop.mapreduce.Job}
- * @param multiTableDescriptors Table descriptor and region locator pairs n
+ * @param multiTableDescriptors Table descriptor and region locator pairs
*/
public static void configureIncrementalLoad(Job job, List multiTableDescriptors)
throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
index 5a5d1149755..35c12672dea 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
@@ -76,8 +76,8 @@ public class MultiTableOutputFormat extends OutputFormatfalse) to improve performance when bulk loading data.
+ * HBaseConfiguration to used whether to use write ahead logging. This can be turned off (
+ * false) to improve performance when bulk loading data.
*/
public MultiTableRecordWriter(Configuration conf, boolean useWriteAheadLogging)
throws IOException {
@@ -88,8 +88,8 @@ public class MultiTableOutputFormat extends OutputFormat getSplits(Configuration conf)
throws IOException {
@@ -112,7 +112,7 @@ public class MultiTableSnapshotInputFormatImpl {
* Retrieve the snapshot name -> list<scan> mapping pushed to configuration by
* {@link #setSnapshotToScans(Configuration, Map)}
* @param conf Configuration to extract name -> list<scan> mappings from.
- * @return the snapshot name -> list<scan> mapping pushed to configuration n
+ * @return the snapshot name -> list<scan> mapping pushed to configuration
*/
public Map> getSnapshotsToScans(Configuration conf) throws IOException {
@@ -136,7 +136,7 @@ public class MultiTableSnapshotInputFormatImpl {
}
/**
- * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY}) nnn
+ * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY})
*/
public void setSnapshotToScans(Configuration conf, Map> snapshotScans)
throws IOException {
@@ -161,7 +161,7 @@ public class MultiTableSnapshotInputFormatImpl {
* Retrieve the directories into which snapshots have been restored from
* ({@link #RESTORE_DIRS_KEY})
* @param conf Configuration to extract restore directories from
- * @return the directories into which snapshots have been restored from n
+ * @return the directories into which snapshots have been restored from
*/
public Map getSnapshotDirs(Configuration conf) throws IOException {
List> kvps = ConfigurationUtil.getKeyValues(conf, RESTORE_DIRS_KEY);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
index 9228daf4fb4..9a8c4fbb545 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
@@ -248,7 +248,7 @@ public class RowCounter extends AbstractHBaseTool {
* Sets filter {@link FilterBase} to the {@link Scan} instance. If provided rowRangeList contains
* more than one element, method sets filter which is instance of {@link MultiRowRangeFilter}.
* Otherwise, method sets filter which is instance of {@link FirstKeyOnlyFilter}. If rowRangeList
- * contains exactly one element, startRow and stopRow are set to the scan. nn
+ * contains exactly one element, startRow and stopRow are set to the scan.
*/
private static void setScanFilter(Scan scan, List rowRangeList) {
final int size = rowRangeList == null ? 0 : rowRangeList.size();
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
index efd872263b1..7d172375c10 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
@@ -362,8 +362,7 @@ public abstract class TableInputFormatBase extends InputFormat createNInputSplitsUniform(InputSplit split, int n)
throws IllegalArgumentIOException {
@@ -581,7 +580,7 @@ public abstract class TableInputFormatBase extends InputFormat my_class, FileSystem fs,
Map packagedClasses) throws IOException {
@@ -897,7 +897,7 @@ public class TableMapReduceUtil {
* that is not the first thing on the class path that has a class with the same name. Looks first
* on the classpath and then in the packagedClasses map.
* @param my_class the class to find.
- * @return a jar file that contains the class, or null. n
+ * @return a jar file that contains the class, or null.
*/
private static String findContainingJar(Class> my_class, Map packagedClasses)
throws IOException {
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index e8316c5016f..17c6c0e4551 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -90,7 +90,8 @@ public class TableOutputFormat extends OutputFormat implemen
private BufferedMutator mutator;
/**
- * n *
+ *
+ *
*/
public TableRecordWriter() throws IOException {
String tableName = conf.get(OUTPUT_TABLE);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
index a0df98796b4..6b22ad1bb0f 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
@@ -70,7 +70,8 @@ public class TableRecordReader extends RecordReaderdoSetup. Hence a
* subclass may choose to override this method and call doSetup as well before
- * handling it's own custom params. n
+ * handling it's own custom params.
*/
@Override
protected void setup(Context context) {
@@ -107,7 +107,7 @@ public class TextSortReducer
}
/**
- * Handles common parameter initialization that a subclass might want to leverage. nn
+ * Handles common parameter initialization that a subclass might want to leverage.
*/
protected void doSetup(Context context, Configuration conf) {
// If a custom separator has been used,
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
index b6c4e814113..4cdb918bdb0 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
@@ -96,7 +96,7 @@ public class TsvImporterMapper extends MapperdoSetup. Hence a
* subclass may choose to override this method and call doSetup as well before
- * handling it's own custom params. n
+ * handling it's own custom params.
*/
@Override
protected void setup(Context context) {
@@ -112,7 +112,7 @@ public class TsvImporterMapper extends MapperdoSetup. Hence a
* subclass may choose to override this method and call doSetup as well before
- * handling it's own custom params. n
+ * handling it's own custom params.
*/
@Override
protected void setup(Context context) {
@@ -79,7 +79,7 @@ public class TsvImporterTextMapper
}
/**
- * Handles common parameter initialization that a subclass might want to leverage. n
+ * Handles common parameter initialization that a subclass might want to leverage.
*/
protected void doSetup(Context context) {
Configuration conf = context.getConfiguration();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 93d439f1608..8fd2d5f7fb2 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -265,7 +265,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
interface Status {
/**
* Sets status
- * @param msg status message n
+ * @param msg status message
*/
void setStatus(final String msg) throws IOException;
}
@@ -549,7 +549,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
/*
* Run a mapreduce job. Run as many maps as asked-for clients. Before we start up the job, write
* out an input file with instruction per client regards which row they are to start on.
- * @param cmd Command to run. n
+ * @param cmd Command to run.
*/
static Job doMapReduce(TestOptions opts, final Configuration conf)
throws IOException, InterruptedException, ClassNotFoundException {
@@ -600,7 +600,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
/*
* Write input file of offsets-per-client for the mapreduce job.
* @param c Configuration
- * @return Directory that contains file written whose name is JOB_INPUT_FILENAME n
+ * @return Directory that contains file written whose name is JOB_INPUT_FILENAME
*/
static Path writeInputFile(final Configuration c, final TestOptions opts) throws IOException {
return writeInputFile(c, opts, new Path("."));
@@ -1354,7 +1354,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
/*
* Run test
- * @return Elapsed time. n
+ * @return Elapsed time.
*/
long test() throws IOException, InterruptedException {
testSetup();
@@ -2448,8 +2448,9 @@ public class PerformanceEvaluation extends Configured implements Tool {
}
/*
- * Format passed integer. n * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version
- * of passed number (Does absolute in case number is negative).
+ * Format passed integer.
+ * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version of passed number (Does
+ * absolute in case number is negative).
*/
public static byte[] format(final int number) {
byte[] b = new byte[ROW_LENGTH];
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
index 7d6dc6e46b7..0ec4c2ffa28 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
@@ -110,15 +110,15 @@ public class TestTableInputFormat {
/**
* Setup a table with two rows and values.
* @param tableName the name of the table to create
- * @return A Table instance for the created table. n
+ * @return A Table instance for the created table.
*/
public static Table createTable(byte[] tableName) throws IOException {
return createTable(tableName, new byte[][] { FAMILY });
}
/**
- * Setup a table with two rows and values per column family. n * @return A Table instance for the
- * created table. n
+ * Setup a table with two rows and values per column family.
+ * @return A Table instance for the created table.
*/
public static Table createTable(byte[] tableName, byte[][] families) throws IOException {
Table table = UTIL.createTable(TableName.valueOf(tableName), families);
@@ -153,7 +153,7 @@ public class TestTableInputFormat {
}
/**
- * Create table data and run tests on specified htable using the o.a.h.hbase.mapred API. nn
+ * Create table data and run tests on specified htable using the o.a.h.hbase.mapred API.
*/
static void runTestMapred(Table table) throws IOException {
org.apache.hadoop.hbase.mapred.TableRecordReader trr =
@@ -181,7 +181,7 @@ public class TestTableInputFormat {
}
/**
- * Create a table that IOE's on first scanner next call n
+ * Create a table that IOE's on first scanner next call
*/
static Table createIOEScannerTable(byte[] name, final int failCnt) throws IOException {
// build up a mock scanner stuff to fail the first time
@@ -212,7 +212,7 @@ public class TestTableInputFormat {
}
/**
- * Create a table that throws a DoNoRetryIOException on first scanner next call n
+ * Create a table that throws a DoNoRetryIOException on first scanner next call
*/
static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException {
// build up a mock scanner stuff to fail the first time
@@ -245,7 +245,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming no errors using mapred api. n
+ * Run test assuming no errors using mapred api.
*/
@Test
public void testTableRecordReader() throws IOException {
@@ -254,7 +254,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming Scanner IOException failure using mapred api, n
+ * Run test assuming Scanner IOException failure using mapred api,
*/
@Test
public void testTableRecordReaderScannerFail() throws IOException {
@@ -263,7 +263,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming Scanner IOException failure using mapred api, n
+ * Run test assuming Scanner IOException failure using mapred api,
*/
@Test(expected = IOException.class)
public void testTableRecordReaderScannerFailTwice() throws IOException {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java
index 12a5650c981..0e7ff24a1da 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java
@@ -201,7 +201,7 @@ public abstract class MultiTableInputFormatTestBase {
}
/**
- * Tests a MR scan using specific start and stop rows. nnn
+ * Tests a MR scan using specific start and stop rows.
*/
private void testScan(String start, String stop, String last)
throws IOException, InterruptedException, ClassNotFoundException {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 54d171659d0..51e9e1e7755 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -822,7 +822,7 @@ public class TestHFileOutputFormat2 {
/**
* Test for {@link HFileOutputFormat2#createFamilyCompressionMap(Configuration)}. Tests that the
- * family compression map is correctly serialized into and deserialized from configuration n
+ * family compression map is correctly serialized into and deserialized from configuration
*/
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
@@ -888,7 +888,7 @@ public class TestHFileOutputFormat2 {
/**
* Test for {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. Tests that the
- * family bloom type map is correctly serialized into and deserialized from configuration n
+ * family bloom type map is correctly serialized into and deserialized from configuration
*/
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
@@ -949,7 +949,7 @@ public class TestHFileOutputFormat2 {
/**
* Test for {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. Tests that the
- * family block size map is correctly serialized into and deserialized from configuration n
+ * family block size map is correctly serialized into and deserialized from configuration
*/
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
@@ -1014,7 +1014,7 @@ public class TestHFileOutputFormat2 {
/**
* Test for {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. Tests that
* the family data block encoding map is correctly serialized into and deserialized from
- * configuration n
+ * configuration
*/
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
index 57ecb5aefa1..86bf3220658 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
@@ -173,8 +173,8 @@ public class TestImportExport {
}
/**
- * Runs an export job with the specified command line args n * @return true if job completed
- * successfully nnn
+ * Runs an export job with the specified command line args
+ * @return true if job completed successfully
*/
protected boolean runExport(String[] args) throws Throwable {
// need to make a copy of the configuration because to make sure different temp dirs are used.
@@ -187,8 +187,8 @@ public class TestImportExport {
}
/**
- * Runs an import job with the specified command line args n * @return true if job completed
- * successfully nnn
+ * Runs an import job with the specified command line args
+ * @return true if job completed successfully
*/
boolean runImport(String[] args) throws Throwable {
// need to make a copy of the configuration because to make sure different temp dirs are used.
@@ -197,7 +197,7 @@ public class TestImportExport {
}
/**
- * Test simple replication case with column mapping n
+ * Test simple replication case with column mapping
*/
@Test
public void testSimpleCase() throws Throwable {
@@ -249,7 +249,7 @@ public class TestImportExport {
}
/**
- * Test export hbase:meta table n
+ * Test export hbase:meta table
*/
@Test
public void testMetaExport() throws Throwable {
@@ -259,7 +259,7 @@ public class TestImportExport {
}
/**
- * Test import data from 0.94 exported file n
+ * Test import data from 0.94 exported file
*/
@Test
public void testImport94Table() throws Throwable {
@@ -508,7 +508,7 @@ public class TestImportExport {
/**
* Count the number of keyvalues in the specified table with the given filter
* @param table the table to scan
- * @return the number of keyvalues found n
+ * @return the number of keyvalues found
*/
private int getCount(Table table, Filter filter) throws IOException {
Scan scan = new Scan();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java
index b73cc7e1abb..a7b977620a1 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java
@@ -154,9 +154,9 @@ public class TestImportTSVWithOperationAttributes implements Configurable {
/**
* Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv
* Tool instance so that other tests can inspect it for further validation as
- * necessary. This method is static to insure non-reliance on instance's util/conf facilities. n *
- * Any arguments to pass BEFORE inputFile path is appended. n * @return The Tool instance used to
- * run the test.
+ * necessary. This method is static to insure non-reliance on instance's util/conf facilities. Any
+ * arguments to pass BEFORE inputFile path is appended.
+ * @return The Tool instance used to run the test.
*/
private Tool doMROnTableTest(HBaseTestingUtil util, String family, String data, String[] args,
int valueMultiplier, boolean dataAvailable) throws Exception {
@@ -193,7 +193,7 @@ public class TestImportTSVWithOperationAttributes implements Configurable {
}
/**
- * Confirm ImportTsv via data in online table. n
+ * Confirm ImportTsv via data in online table.
*/
private static void validateTable(Configuration conf, TableName tableName, String family,
int valueMultiplier, boolean dataAvailable) throws IOException {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
index cae349ce05d..e15181e9c94 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
@@ -316,8 +316,8 @@ public class TestImportTSVWithVisibilityLabels implements Configurable {
/**
* Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv
* Tool instance so that other tests can inspect it for further validation as
- * necessary. This method is static to insure non-reliance on instance's util/conf facilities. n *
- * Any arguments to pass BEFORE inputFile path is appended.
+ * necessary. This method is static to insure non-reliance on instance's util/conf facilities. Any
+ * arguments to pass BEFORE inputFile path is appended.
* @param expectedKVCount Expected KV count. pass -1 to skip the kvcount check
* @return The Tool instance used to run the test.
*/
@@ -461,7 +461,7 @@ public class TestImportTSVWithVisibilityLabels implements Configurable {
* Method returns the total KVs in given hfile
* @param fs File System
* @param p HFile path
- * @return KV count in the given hfile n
+ * @return KV count in the given hfile
*/
private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException {
Configuration conf = util.getConfiguration();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
index 83634742b28..9316b09b8c9 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
@@ -540,7 +540,7 @@ public class TestImportTsv implements Configurable {
* Method returns the total KVs in given hfile
* @param fs File System
* @param p HFile path
- * @return KV count in the given hfile n
+ * @return KV count in the given hfile
*/
private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException {
Configuration conf = util.getConfiguration();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
index bfccff65c66..7d099aa44e2 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
@@ -80,7 +80,7 @@ public class TestMultiTableInputFormatBase {
/**
* Test getSplits only puts up one Connection. In past it has put up many Connections. Each
* Connection setup comes with a fresh new cache so we have to do fresh hit on hbase:meta. Should
- * only do one Connection when doing getSplits even if a MultiTableInputFormat. n
+ * only do one Connection when doing getSplits even if a MultiTableInputFormat.
*/
@Test
public void testMRSplitsConnectionCount() throws IOException {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java
index 3db7fa7ef0b..f5f0fdf169a 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java
@@ -93,7 +93,7 @@ public class TestMultithreadedTableMapper {
public static class ProcessContentsMapper extends TableMapper {
/**
- * Pass the key, and reversed value to reduce nnnn
+ * Pass the key, and reversed value to reduce
*/
@Override
public void map(ImmutableBytesWritable key, Result value, Context context)
@@ -118,7 +118,7 @@ public class TestMultithreadedTableMapper {
}
/**
- * Test multithreadedTableMappper map/reduce against a multi-region table nnn
+ * Test multithreadedTableMappper map/reduce against a multi-region table
*/
@Test
public void testMultithreadedTableMapper()
@@ -184,7 +184,8 @@ public class TestMultithreadedTableMapper {
/**
* Looks at every value of the mapreduce output and verifies that indeed the values have been
* reversed.
- * @param table Table to scan. n * @throws NullPointerException if we failed to find a cell value
+ * @param table Table to scan.
+ * @throws NullPointerException if we failed to find a cell value
*/
private void verifyAttempt(final Table table) throws IOException, NullPointerException {
Scan scan = new Scan();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
index 49daac88e37..8f15fb1c170 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
@@ -87,7 +87,7 @@ public class TestRowCounter {
}
/**
- * Test a case when no column was specified in command line arguments. n
+ * Test a case when no column was specified in command line arguments.
*/
@Test
public void testRowCounterNoColumn() throws Exception {
@@ -96,7 +96,7 @@ public class TestRowCounter {
}
/**
- * Test a case when the column specified in command line arguments is exclusive for few rows. n
+ * Test a case when the column specified in command line arguments is exclusive for few rows.
*/
@Test
public void testRowCounterExclusiveColumn() throws Exception {
@@ -106,7 +106,7 @@ public class TestRowCounter {
/**
* Test a case when the column specified in command line arguments is one for which the qualifier
- * contains colons. n
+ * contains colons.
*/
@Test
public void testRowCounterColumnWithColonInQualifier() throws Exception {
@@ -116,7 +116,7 @@ public class TestRowCounter {
/**
* Test a case when the column specified in command line arguments is not part of first KV for a
- * row. n
+ * row.
*/
@Test
public void testRowCounterHiddenColumn() throws Exception {
@@ -126,7 +126,7 @@ public class TestRowCounter {
/**
* Test a case when the column specified in command line arguments is exclusive for few rows and
- * also a row range filter is specified n
+ * also a row range filter is specified
*/
@Test
public void testRowCounterColumnAndRowRange() throws Exception {
@@ -135,7 +135,7 @@ public class TestRowCounter {
}
/**
- * Test a case when a range is specified with single range of start-end keys n
+ * Test a case when a range is specified with single range of start-end keys
*/
@Test
public void testRowCounterRowSingleRange() throws Exception {
@@ -144,7 +144,7 @@ public class TestRowCounter {
}
/**
- * Test a case when a range is specified with single range with end key only n
+ * Test a case when a range is specified with single range with end key only
*/
@Test
public void testRowCounterRowSingleRangeUpperBound() throws Exception {
@@ -153,7 +153,7 @@ public class TestRowCounter {
}
/**
- * Test a case when a range is specified with two ranges where one range is with end key only n
+ * Test a case when a range is specified with two ranges where one range is with end key only
*/
@Test
public void testRowCounterRowMultiRangeUpperBound() throws Exception {
@@ -162,7 +162,7 @@ public class TestRowCounter {
}
/**
- * Test a case when a range is specified with multiple ranges of start-end keys n
+ * Test a case when a range is specified with multiple ranges of start-end keys
*/
@Test
public void testRowCounterRowMultiRange() throws Exception {
@@ -172,7 +172,7 @@ public class TestRowCounter {
/**
* Test a case when a range is specified with multiple ranges of start-end keys; one range is
- * filled, another two are not n
+ * filled, another two are not
*/
@Test
public void testRowCounterRowMultiEmptyRange() throws Exception {
@@ -193,7 +193,7 @@ public class TestRowCounter {
}
/**
- * Test a case when the timerange is specified with --starttime and --endtime options n
+ * Test a case when the timerange is specified with --starttime and --endtime options
*/
@Test
public void testRowCounterTimeRange() throws Exception {
@@ -241,7 +241,7 @@ public class TestRowCounter {
/**
* Run the RowCounter map reduce job and verify the row count.
* @param args the command line arguments to be used for rowcounter job.
- * @param expectedCount the expected row count (result of map reduce job). n
+ * @param expectedCount the expected row count (result of map reduce job).
*/
private void runRowCount(String[] args, int expectedCount) throws Exception {
RowCounter rowCounter = new RowCounter();
@@ -433,7 +433,7 @@ public class TestRowCounter {
/**
* Writes TOTAL_ROWS number of distinct rows in to the table. Few rows have two columns, Few have
- * one. nn
+ * one.
*/
private static void writeRows(Table table, int totalRows, int rowsWithOneCol) throws IOException {
final byte[] family = Bytes.toBytes(COL_FAM);
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java
index bf1a7439b4e..ca0b9df79d3 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java
@@ -105,15 +105,16 @@ public class TestTableInputFormat {
}
/**
- * Setup a table with two rows and values. n * @return A Table instance for the created table. n
+ * Setup a table with two rows and values.
+ * @return A Table instance for the created table.
*/
public static Table createTable(byte[] tableName) throws IOException {
return createTable(tableName, new byte[][] { FAMILY });
}
/**
- * Setup a table with two rows and values per column family. n * @return A Table instance for the
- * created table. n
+ * Setup a table with two rows and values per column family.
+ * @return A Table instance for the created table.
*/
public static Table createTable(byte[] tableName, byte[][] families) throws IOException {
Table table = UTIL.createTable(TableName.valueOf(tableName), families);
@@ -148,7 +149,7 @@ public class TestTableInputFormat {
}
/**
- * Create table data and run tests on specified htable using the o.a.h.hbase.mapreduce API. nnn
+ * Create table data and run tests on specified htable using the o.a.h.hbase.mapreduce API.
*/
static void runTestMapreduce(Table table) throws IOException, InterruptedException {
org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl trr =
@@ -182,7 +183,7 @@ public class TestTableInputFormat {
}
/**
- * Create a table that IOE's on first scanner next call n
+ * Create a table that IOE's on first scanner next call
*/
static Table createIOEScannerTable(byte[] name, final int failCnt) throws IOException {
// build up a mock scanner stuff to fail the first time
@@ -213,7 +214,7 @@ public class TestTableInputFormat {
}
/**
- * Create a table that throws a NotServingRegionException on first scanner next call n
+ * Create a table that throws a NotServingRegionException on first scanner next call
*/
static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException {
// build up a mock scanner stuff to fail the first time
@@ -246,7 +247,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming no errors using newer mapreduce api nn
+ * Run test assuming no errors using newer mapreduce api
*/
@Test
public void testTableRecordReaderMapreduce() throws IOException, InterruptedException {
@@ -255,7 +256,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming Scanner IOException failure using newer mapreduce api nn
+ * Run test assuming Scanner IOException failure using newer mapreduce api
*/
@Test
public void testTableRecordReaderScannerFailMapreduce() throws IOException, InterruptedException {
@@ -264,7 +265,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming Scanner IOException failure using newer mapreduce api nn
+ * Run test assuming Scanner IOException failure using newer mapreduce api
*/
@Test(expected = IOException.class)
public void testTableRecordReaderScannerFailMapreduceTwice()
@@ -274,8 +275,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming NotServingRegionException using newer mapreduce api n * @throws
- * org.apache.hadoop.hbase.DoNotRetryIOException
+ * Run test assuming NotServingRegionException using newer mapreduce api
*/
@Test
public void testTableRecordReaderScannerTimeoutMapreduce()
@@ -285,8 +285,7 @@ public class TestTableInputFormat {
}
/**
- * Run test assuming NotServingRegionException using newer mapreduce api n * @throws
- * org.apache.hadoop.hbase.NotServingRegionException
+ * Run test assuming NotServingRegionException using newer mapreduce api
*/
@Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class)
public void testTableRecordReaderScannerTimeoutMapreduceTwice()
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java
index e1bd1626870..99606050667 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java
@@ -76,7 +76,7 @@ public class TestTableMapReduce extends TestTableMapReduceBase {
static class ProcessContentsMapper extends TableMapper {
/**
- * Pass the key, and reversed value to reduce nnnn
+ * Pass the key, and reversed value to reduce
*/
@Override
public void map(ImmutableBytesWritable key, Result value, Context context)
@@ -136,7 +136,7 @@ public class TestTableMapReduce extends TestTableMapReduceBase {
}
/**
- * Verify scan counters are emitted from the job nn
+ * Verify scan counters are emitted from the job
*/
private void verifyJobCountersAreEmitted(Job job) throws IOException {
Counters counters = job.getCounters();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java
index 7490587b109..477ea5d7f6d 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java
@@ -83,7 +83,7 @@ public abstract class TestTableMapReduceBase {
}
/**
- * Test a map/reduce against a multi-region table n
+ * Test a map/reduce against a multi-region table
*/
@Test
public void testMultiRegionTable() throws IOException {
@@ -152,7 +152,8 @@ public abstract class TestTableMapReduceBase {
/**
* Looks at every value of the mapreduce output and verifies that indeed the values have been
* reversed.
- * @param table Table to scan. n * @throws NullPointerException if we failed to find a cell value
+ * @param table Table to scan.
+ * @throws NullPointerException if we failed to find a cell value
*/
private void verifyAttempt(final Table table) throws IOException, NullPointerException {
Scan scan = new Scan();
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
index 7fda7422023..0c518de221f 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
@@ -148,8 +148,7 @@ public class ProcedureWALPrettyPrinter extends Configured implements Tool {
/**
* Pass one or more log file names and formatting options and it will dump out a text version of
- * the contents on stdout . n * Command line arguments n * Thrown upon file system
- * errors etc.
+ * the contents on stdout . Command line arguments Thrown upon file system errors etc.
*/
@Override
public int run(final String[] args) throws IOException {
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
index 61dede2ae83..47852f4df2b 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
@@ -41,7 +41,7 @@ public class ExistsResource extends ResourceBase {
TableResource tableResource;
/**
- * Constructor nn
+ * Constructor
*/
public ExistsResource(TableResource tableResource) throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
index 68d774e420c..cc5fb22265c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
@@ -44,7 +44,7 @@ public class MultiRowResource extends ResourceBase implements Constants {
String[] columns = null;
/**
- * Constructor nn * @throws java.io.IOException
+ * Constructor
*/
public MultiRowResource(TableResource tableResource, String versions, String columnsStr)
throws IOException {
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java
index e1282c493ab..e27ee6ddb91 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java
@@ -62,14 +62,14 @@ public class NamespacesInstanceResource extends ResourceBase {
boolean queryTables = false;
/**
- * Constructor for standard NamespaceInstanceResource. n
+ * Constructor for standard NamespaceInstanceResource.
*/
public NamespacesInstanceResource(String namespace) throws IOException {
this(namespace, false);
}
/**
- * Constructor for querying namespace table list via NamespaceInstanceResource. n
+ * Constructor for querying namespace table list via NamespaceInstanceResource.
*/
public NamespacesInstanceResource(String namespace, boolean queryTables) throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java
index a3c0e2d2f1a..aeccda24f19 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java
@@ -44,7 +44,7 @@ public class NamespacesResource extends ResourceBase {
private static final Logger LOG = LoggerFactory.getLogger(NamespacesResource.class);
/**
- * Constructor n
+ * Constructor
*/
public NamespacesResource() throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
index 39a7ba71dd6..2e01ff24d47 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
@@ -32,7 +32,7 @@ public interface ProtobufMessageHandler {
/**
* Initialize the model from a protobuf representation.
* @param message the raw bytes of the protobuf message
- * @return reference to self for convenience n
+ * @return reference to self for convenience
*/
ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException;
}
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
index 79760aead9d..7212993fb8d 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
@@ -90,7 +90,7 @@ public class RESTServlet implements Constants {
/**
* Constructor with existing configuration
* @param conf existing configuration
- * @param userProvider the login user provider n
+ * @param userProvider the login user provider
*/
RESTServlet(final Configuration conf, final UserProvider userProvider) throws IOException {
this.realUser = userProvider.getCurrent().getUGI();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
index 21c97302603..17beae40f7b 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
@@ -55,7 +55,7 @@ public class RegionsResource extends ResourceBase {
TableResource tableResource;
/**
- * Constructor nn
+ * Constructor
*/
public RegionsResource(TableResource tableResource) throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
index 9baf7aa7c04..babb3d1152c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
@@ -48,7 +48,7 @@ public class RootResource extends ResourceBase {
}
/**
- * Constructor n
+ * Constructor
*/
public RootResource() throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index cfd63aa2d1c..b599b0b1949 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -69,7 +69,7 @@ public class RowResource extends ResourceBase {
private boolean returnResult = false;
/**
- * Constructor nnnnnn
+ * Constructor
*/
public RowResource(TableResource tableResource, String rowspec, String versions, String check,
String returnResult) throws IOException {
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
index f5606bb25d7..49801b4f7d8 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
@@ -52,7 +52,7 @@ public class ScannerResource extends ResourceBase {
TableResource tableResource;
/**
- * Constructor nn
+ * Constructor
*/
public ScannerResource(TableResource tableResource) throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
index 8348b79985c..958a1288d4f 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
@@ -51,7 +51,7 @@ public class StorageClusterStatusResource extends ResourceBase {
}
/**
- * Constructor n
+ * Constructor
*/
public StorageClusterStatusResource() throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
index ea7641e54cd..00c243aec72 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
@@ -45,7 +45,7 @@ public class StorageClusterVersionResource extends ResourceBase {
}
/**
- * Constructor n
+ * Constructor
*/
public StorageClusterVersionResource() throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
index 2fe26deb542..dbac4686520 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
@@ -46,7 +46,7 @@ public class TableResource extends ResourceBase {
private static final Logger LOG = LoggerFactory.getLogger(TableResource.class);
/**
- * Constructor nn
+ * Constructor
*/
public TableResource(String table) throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
index 8b71f708645..d78ba90cd8c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
@@ -53,7 +53,7 @@ public class VersionResource extends ResourceBase {
}
/**
- * Constructor n
+ * Constructor
*/
public VersionResource() throws IOException {
super();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
index 85cb2af86a8..3f406fb5d92 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
@@ -255,7 +255,7 @@ public class Client {
* @param method the transaction method
* @param headers HTTP header values to send
* @param path the properly urlencoded path
- * @return the HTTP response code n
+ * @return the HTTP response code
*/
public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, Header[] headers,
String path) throws IOException {
@@ -309,7 +309,7 @@ public class Client {
* @param method the transaction method
* @param headers HTTP header values to send
* @param uri a properly urlencoded URI
- * @return the HTTP response code n
+ * @return the HTTP response code
*/
public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String uri)
throws IOException {
@@ -348,7 +348,7 @@ public class Client {
* @param method the HTTP method
* @param headers HTTP header values to send
* @param path the properly urlencoded path or URI
- * @return the HTTP response code n
+ * @return the HTTP response code
*/
public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers, String path)
throws IOException {
@@ -407,7 +407,7 @@ public class Client {
/**
* Send a HEAD request
* @param path the path or URI
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response head(String path) throws IOException {
return head(cluster, path, null);
@@ -418,7 +418,7 @@ public class Client {
* @param cluster the cluster definition
* @param path the path or URI
* @param headers the HTTP headers to include in the request
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response head(Cluster cluster, String path, Header[] headers) throws IOException {
HttpHead method = new HttpHead(path);
@@ -433,7 +433,7 @@ public class Client {
/**
* Send a GET request
* @param path the path or URI
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response get(String path) throws IOException {
return get(cluster, path);
@@ -443,7 +443,7 @@ public class Client {
* Send a GET request
* @param cluster the cluster definition
* @param path the path or URI
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response get(Cluster cluster, String path) throws IOException {
return get(cluster, path, EMPTY_HEADER_ARRAY);
@@ -453,7 +453,7 @@ public class Client {
* Send a GET request
* @param path the path or URI
* @param accept Accept header value
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response get(String path, String accept) throws IOException {
return get(cluster, path, accept);
@@ -464,7 +464,7 @@ public class Client {
* @param cluster the cluster definition
* @param path the path or URI
* @param accept Accept header value
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response get(Cluster cluster, String path, String accept) throws IOException {
Header[] headers = new Header[1];
@@ -476,7 +476,7 @@ public class Client {
* Send a GET request
* @param path the path or URI
* @param headers the HTTP headers to include in the request, Accept must be supplied
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response get(String path, Header[] headers) throws IOException {
return get(cluster, path, headers);
@@ -522,7 +522,7 @@ public class Client {
* @param c the cluster definition
* @param path the path or URI
* @param headers the HTTP headers to include in the request
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response get(Cluster c, String path, Header[] headers) throws IOException {
if (httpGet != null) {
@@ -539,7 +539,7 @@ public class Client {
* @param path the path or URI
* @param contentType the content MIME type
* @param content the content bytes
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response put(String path, String contentType, byte[] content) throws IOException {
return put(cluster, path, contentType, content);
@@ -551,7 +551,7 @@ public class Client {
* @param contentType the content MIME type
* @param content the content bytes
* @param extraHdr extra Header to send
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response put(String path, String contentType, byte[] content, Header extraHdr)
throws IOException {
@@ -600,7 +600,7 @@ public class Client {
* @param path the path or URI
* @param headers the HTTP headers to include, Content-Type must be supplied
* @param content the content bytes
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response put(String path, Header[] headers, byte[] content) throws IOException {
return put(cluster, path, headers, content);
@@ -612,7 +612,7 @@ public class Client {
* @param path the path or URI
* @param headers the HTTP headers to include, Content-Type must be supplied
* @param content the content bytes
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response put(Cluster cluster, String path, Header[] headers, byte[] content)
throws IOException {
@@ -633,7 +633,7 @@ public class Client {
* @param path the path or URI
* @param contentType the content MIME type
* @param content the content bytes
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response post(String path, String contentType, byte[] content) throws IOException {
return post(cluster, path, contentType, content);
@@ -645,7 +645,7 @@ public class Client {
* @param contentType the content MIME type
* @param content the content bytes
* @param extraHdr additional Header to send
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response post(String path, String contentType, byte[] content, Header extraHdr)
throws IOException {
@@ -694,7 +694,7 @@ public class Client {
* @param path the path or URI
* @param headers the HTTP headers to include, Content-Type must be supplied
* @param content the content bytes
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response post(String path, Header[] headers, byte[] content) throws IOException {
return post(cluster, path, headers, content);
@@ -706,7 +706,7 @@ public class Client {
* @param path the path or URI
* @param headers the HTTP headers to include, Content-Type must be supplied
* @param content the content bytes
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response post(Cluster cluster, String path, Header[] headers, byte[] content)
throws IOException {
@@ -725,7 +725,7 @@ public class Client {
/**
* Send a DELETE request
* @param path the path or URI
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response delete(String path) throws IOException {
return delete(cluster, path);
@@ -735,7 +735,7 @@ public class Client {
* Send a DELETE request
* @param path the path or URI
* @param extraHdr additional Header to send
- * @return a Response object with response detail n
+ * @return a Response object with response detail
*/
public Response delete(String path, Header extraHdr) throws IOException {
return delete(cluster, path, extraHdr);
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java
index 47e67dbea5a..9071c31614c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java
@@ -139,7 +139,7 @@ public class RestCsrfPreventionFilter implements Filter {
String getHeader(String header);
/**
- * Returns the method. n
+ * Returns the method.
*/
String getMethod();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
index 48c7e12202b..eda3267bf58 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
@@ -82,21 +82,21 @@ public class CellModel implements ProtobufMessageHandler, Serializable {
}
/**
- * Constructor nn
+ * Constructor
*/
public CellModel(byte[] column, byte[] value) {
this(column, HConstants.LATEST_TIMESTAMP, value);
}
/**
- * Constructor nnn
+ * Constructor
*/
public CellModel(byte[] column, byte[] qualifier, byte[] value) {
this(column, qualifier, HConstants.LATEST_TIMESTAMP, value);
}
/**
- * Constructor from KeyValue n
+ * Constructor from KeyValue
*/
public CellModel(org.apache.hadoop.hbase.Cell cell) {
this(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(),
@@ -104,7 +104,7 @@ public class CellModel implements ProtobufMessageHandler, Serializable {
}
/**
- * Constructor nnn
+ * Constructor
*/
public CellModel(byte[] column, long timestamp, byte[] value) {
this.column = column;
@@ -113,7 +113,7 @@ public class CellModel implements ProtobufMessageHandler, Serializable {
}
/**
- * Constructor nnnn
+ * Constructor
*/
public CellModel(byte[] column, byte[] qualifier, long timestamp, byte[] value) {
this.column = CellUtil.makeColumn(column, qualifier);
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
index c45ca38be9f..64b46f2956c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
@@ -64,7 +64,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan
/**
* Constructor to use if namespace does not exist in HBASE.
- * @param namespaceName the namespace name. n
+ * @param namespaceName the namespace name.
*/
public NamespacesInstanceModel(String namespaceName) throws IOException {
this(null, namespaceName);
@@ -73,7 +73,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan
/**
* Constructor
* @param admin the administrative API
- * @param namespaceName the namespace name. n
+ * @param namespaceName the namespace name.
*/
public NamespacesInstanceModel(Admin admin, String namespaceName) throws IOException {
this.namespaceName = namespaceName;
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
index c9755532c49..e866c7a935d 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
@@ -58,7 +58,7 @@ public class NamespacesModel implements Serializable, ProtobufMessageHandler {
/**
* Constructor
- * @param admin the administrative API n
+ * @param admin the administrative API
*/
public NamespacesModel(Admin admin) throws IOException {
NamespaceDescriptor[] nds = admin.listNamespaceDescriptors();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
index e2b20aaa84e..3655a379804 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
@@ -507,7 +507,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable {
/**
* @param s the JSON representation of the filter
- * @return the filter n
+ * @return the filter
*/
public static Filter buildFilter(String s) throws Exception {
FilterModel model =
@@ -518,7 +518,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable {
/**
* @param filter the filter
- * @return the JSON representation of the filter n
+ * @return the JSON representation of the filter
*/
public static String stringifyFilter(final Filter filter) throws Exception {
return getJasonProvider().locateMapper(FilterModel.class, MediaType.APPLICATION_JSON_TYPE)
@@ -528,7 +528,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable {
private static final byte[] COLUMN_DIVIDER = Bytes.toBytes(":");
/**
- * @param scan the scan specification n
+ * @param scan the scan specification
*/
public static ScannerModel fromScan(Scan scan) throws Exception {
ScannerModel model = new ScannerModel();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
index c1023353a70..74d0732ec91 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
@@ -60,7 +60,7 @@ public class TableInfoModel implements Serializable, ProtobufMessageHandler {
}
/**
- * Constructor n
+ * Constructor
*/
public TableInfoModel(String name) {
this.name = name;
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
index 51a2bc567cd..32459738002 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
@@ -48,7 +48,7 @@ public class TableModel implements Serializable {
}
/**
- * Constructor n
+ * Constructor
*/
public TableModel(String name) {
super();
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
index 82f3a9481c8..bccc97deca8 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
@@ -52,7 +52,7 @@ public class RemoteAdmin {
private static volatile Unmarshaller versionClusterUnmarshaller;
/**
- * Constructor nn
+ * Constructor
*/
public RemoteAdmin(Client client, Configuration conf) {
this(client, conf, null);
@@ -69,7 +69,7 @@ public class RemoteAdmin {
}
/**
- * Constructor nnn
+ * Constructor
*/
public RemoteAdmin(Client client, Configuration conf, String accessToken) {
this.client = client;
@@ -89,8 +89,8 @@ public class RemoteAdmin {
}
/**
- * @return string representing the rest api's version n * if the endpoint does not exist, there is
- * a timeout, or some other general failure mode
+ * @return string representing the rest api's version if the endpoint does not exist, there is a
+ * timeout, or some other general failure mode
*/
public VersionModel getRestVersion() throws IOException {
@@ -169,8 +169,8 @@ public class RemoteAdmin {
}
/**
- * @return string representing the cluster's version n * if the endpoint does not exist, there is
- * a timeout, or some other general failure mode
+ * @return string representing the cluster's version if the endpoint does not exist, there is a
+ * timeout, or some other general failure mode
*/
public StorageClusterVersionModel getClusterVersion() throws IOException {
@@ -336,8 +336,8 @@ public class RemoteAdmin {
}
/**
- * @return string representing the cluster's version n * if the endpoint does not exist, there is
- * a timeout, or some other general failure mode
+ * @return string representing the cluster's version if the endpoint does not exist, there is a
+ * timeout, or some other general failure mode
*/
public TableListModel getTableList() throws IOException {
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
index 021c03ce85b..914ed5740a9 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
@@ -575,7 +575,7 @@ public class TestRemoteTable {
/**
* Tests scanner with limitation limit the number of rows each scanner scan fetch at life time The
- * number of rows returned should be equal to the limit n
+ * number of rows returned should be equal to the limit
*/
@Test
public void testLimitedScan() throws Exception {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
index 2ef58351439..11e6b07a040 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
@@ -577,7 +577,7 @@ public abstract class HBaseServerBase> extends
}
/**
- * get NamedQueue Provider to add different logs to ringbuffer n
+ * get NamedQueue Provider to add different logs to ringbuffer
*/
public NamedQueueRecorder getNamedQueueRecorder() {
return this.namedQueueRecorder;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java
index 44498200991..5eca7afd28b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java
@@ -34,7 +34,7 @@ class HealthReport {
}
/**
- * Gets the status of the region server. n
+ * Gets the status of the region server.
*/
HealthCheckerExitStatus getStatus() {
return status;
@@ -46,7 +46,7 @@ class HealthReport {
}
/**
- * Gets the health report of the region server. n
+ * Gets the health report of the region server.
*/
String getHealthReport() {
return healthReport;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
index 38775bf9384..0839a23f42d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
@@ -68,7 +68,7 @@ public class LocalHBaseCluster {
private final Class extends HRegionServer> regionServerClass;
/**
- * Constructor. nn
+ * Constructor.
*/
public LocalHBaseCluster(final Configuration conf) throws IOException {
this(conf, DEFAULT_NO);
@@ -77,7 +77,7 @@ public class LocalHBaseCluster {
/**
* Constructor.
* @param conf Configuration to use. Post construction has the master's address.
- * @param noRegionServers Count of regionservers to start. n
+ * @param noRegionServers Count of regionservers to start.
*/
public LocalHBaseCluster(final Configuration conf, final int noRegionServers) throws IOException {
this(conf, 1, 0, noRegionServers, getMasterImplementation(conf),
@@ -88,7 +88,7 @@ public class LocalHBaseCluster {
* Constructor.
* @param conf Configuration to use. Post construction has the active master address.
* @param noMasters Count of masters to start.
- * @param noRegionServers Count of regionservers to start. n
+ * @param noRegionServers Count of regionservers to start.
*/
public LocalHBaseCluster(final Configuration conf, final int noMasters, final int noRegionServers)
throws IOException {
@@ -118,7 +118,7 @@ public class LocalHBaseCluster {
* Constructor.
* @param conf Configuration to use. Post construction has the master's address.
* @param noMasters Count of masters to start.
- * @param noRegionServers Count of regionservers to start. nnn
+ * @param noRegionServers Count of regionservers to start.
*/
@SuppressWarnings("unchecked")
public LocalHBaseCluster(final Configuration conf, final int noMasters,
@@ -242,9 +242,7 @@ public class LocalHBaseCluster {
});
}
- /**
- * n * @return region server
- */
+ /** Returns region server */
public HRegionServer getRegionServer(int serverNumber) {
return regionThreads.get(serverNumber).getRegionServer();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java
index 47f6938652d..62da616acb5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java
@@ -30,7 +30,7 @@ public interface RegionStateListener {
// state than introduce a whole new listening mechanism? St.Ack
/**
* Process region split event.
- * @param hri An instance of RegionInfo n
+ * @param hri An instance of RegionInfo
*/
void onRegionSplit(RegionInfo hri) throws IOException;
@@ -42,7 +42,7 @@ public interface RegionStateListener {
void onRegionSplitReverted(RegionInfo hri) throws IOException;
/**
- * Process region merge event. n
+ * Process region merge event.
*/
void onRegionMerged(RegionInfo mergedRegion) throws IOException;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
index 280ad3b7c47..d9bec2e3d81 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
@@ -146,8 +146,8 @@ public class SplitLogTask {
/**
* @param data Serialized date to parse.
- * @return An SplitLogTaskState instance made of the passed data n * @see
- * #toByteArray()
+ * @return An SplitLogTaskState instance made of the passed data
+ * @see #toByteArray()
*/
public static SplitLogTask parseFrom(final byte[] data) throws DeserializationException {
ProtobufUtil.expectPBMagicPrefix(data);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
index 68dc87502e0..8615efe6a7e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
@@ -692,12 +692,12 @@ public class HFileArchiver {
/**
* @return if this is a directory, returns all the children in the directory, otherwise returns
- * an empty list n
+ * an empty list
*/
abstract Collection getChildren() throws IOException;
/**
- * close any outside readers of the file n
+ * close any outside readers of the file
*/
abstract void close() throws IOException;
@@ -708,7 +708,8 @@ public class HFileArchiver {
abstract Path getPath();
/**
- * Move the file to the given destination n * @return true on success n
+ * Move the file to the given destination
+ * @return true on success
*/
public boolean moveAndClose(Path dest) throws IOException {
this.close();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java
index 618a5a66524..2d4bea3a7e3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java
@@ -93,8 +93,7 @@ public final class VersionInfoUtil {
}
/**
- * n * @return the passed-in version int as a version String (e.g. 0x0103004 is
- * 1.3.4)
+ * Returns the passed-in version int as a version String (e.g. 0x0103004 is 1.3.4)
*/
public static String versionNumberToString(final int version) {
return String.format("%d.%d.%d", ((version >> 20) & 0xff), ((version >> 12) & 0xff),
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
index 29aa273b2b3..a7f813aeea0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
@@ -131,7 +131,7 @@ public interface SplitLogManagerCoordination {
void deleteTask(String taskName);
/**
- * Support method to init constants such as timeout. Mostly required for UTs. n
+ * Support method to init constants such as timeout. Mostly required for UTs.
*/
void init() throws IOException;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
index fcf103c82e2..eeba55d2d54 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
@@ -139,7 +139,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener
* It is possible for a task to stay in UNASSIGNED state indefinitely - say SplitLogManager wants
* to resubmit a task. It forces the task to UNASSIGNED state but it dies before it could create
* the RESCAN task node to signal the SplitLogWorkers to pick up the task. To prevent this
- * scenario the SplitLogManager resubmits all orphan and UNASSIGNED tasks at startup. n
+ * scenario the SplitLogManager resubmits all orphan and UNASSIGNED tasks at startup.
*/
private void handleUnassignedTask(String path) {
if (ZKSplitLog.isRescanNode(watcher, path)) {
@@ -551,7 +551,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener
* partially done tasks are present. taskname is the name of the task that was put up in
* zookeeper.
*
- * nn * @return DONE if task completed successfully, ERR otherwise
+ * @return DONE if task completed successfully, ERR otherwise
*/
Status finish(ServerName workerName, String taskname);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
index 7acb0891dbc..6def70f9714 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
@@ -374,7 +374,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements SplitLog
* in a cluster.
*
* Synchronization using taskReadySeq ensures that it will try to grab every task
- * that has been put up n
+ * that has been put up
*/
@Override
public void taskLoop() throws InterruptedException {
@@ -534,7 +534,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements SplitLog
*/
/**
* endTask() can fail and the only way to recover out of it is for the
- * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node. nn
+ * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node.
*/
@Override
public void endTask(SplitLogTask slt, LongAdder ctr, SplitTaskDetails details) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
index cc8977f4581..c1ba9e274ad 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
@@ -655,7 +655,7 @@ public abstract class CoprocessorHostHBASE-16663
- * @return true if bypaas coprocessor execution, false if not. n
+ * @return true if bypaas coprocessor execution, false if not.
*/
protected boolean execShutdown(final ObserverOperation observerOperation)
throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index ad381dd4ef3..175ff25e761 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -493,7 +493,7 @@ public interface MasterObserver {
/**
* Called prior to unassigning a given region.
- * @param ctx the environment to interact with the framework and master n
+ * @param ctx the environment to interact with the framework and master
*/
default void preUnassign(final ObserverContext ctx,
final RegionInfo regionInfo) throws IOException {
@@ -501,7 +501,7 @@ public interface MasterObserver {
/**
* Called after the region unassignment has been requested.
- * @param ctx the environment to interact with the framework and master n
+ * @param ctx the environment to interact with the framework and master
*/
default void postUnassign(final ObserverContext ctx,
final RegionInfo regionInfo) throws IOException {
@@ -509,7 +509,7 @@ public interface MasterObserver {
/**
* Called prior to marking a given region as offline.
- * @param ctx the environment to interact with the framework and master n
+ * @param ctx the environment to interact with the framework and master
*/
default void preRegionOffline(final ObserverContext ctx,
final RegionInfo regionInfo) throws IOException {
@@ -517,7 +517,7 @@ public interface MasterObserver {
/**
* Called after the region has been marked offline.
- * @param ctx the environment to interact with the framework and master n
+ * @param ctx the environment to interact with the framework and master
*/
default void postRegionOffline(final ObserverContext ctx,
final RegionInfo regionInfo) throws IOException {
@@ -597,7 +597,7 @@ public interface MasterObserver {
/**
* This will be called before update META step as part of split transaction.
- * @param ctx the environment to interact with the framework and master nn
+ * @param ctx the environment to interact with the framework and master
*/
default void preSplitRegionBeforeMETAAction(
final ObserverContext ctx, final byte[] splitKey,
@@ -1465,67 +1465,72 @@ public interface MasterObserver {
}
/**
- * Called before remove a replication peer n * @param peerId a short name that identifies the peer
+ * Called before remove a replication peer
+ * @param peerId a short name that identifies the peer
*/
default void preRemoveReplicationPeer(final ObserverContext ctx,
String peerId) throws IOException {
}
/**
- * Called after remove a replication peer n * @param peerId a short name that identifies the peer
+ * Called after remove a replication peer
+ * @param peerId a short name that identifies the peer
*/
default void postRemoveReplicationPeer(final ObserverContext ctx,
String peerId) throws IOException {
}
/**
- * Called before enable a replication peer n * @param peerId a short name that identifies the peer
+ * Called before enable a replication peer
+ * @param peerId a short name that identifies the peer
*/
default void preEnableReplicationPeer(final ObserverContext ctx,
String peerId) throws IOException {
}
/**
- * Called after enable a replication peer n * @param peerId a short name that identifies the peer
+ * Called after enable a replication peer
+ * @param peerId a short name that identifies the peer
*/
default void postEnableReplicationPeer(final ObserverContext ctx,
String peerId) throws IOException {
}
/**
- * Called before disable a replication peer n * @param peerId a short name that identifies the
- * peer
+ * Called before disable a replication peer
+ * @param peerId a short name that identifies the peer
*/
default void preDisableReplicationPeer(final ObserverContext ctx,
String peerId) throws IOException {
}
/**
- * Called after disable a replication peer n * @param peerId a short name that identifies the peer
+ * Called after disable a replication peer
+ * @param peerId a short name that identifies the peer
*/
default void postDisableReplicationPeer(final ObserverContext ctx,
String peerId) throws IOException {
}
/**
- * Called before get the configured ReplicationPeerConfig for the specified peer n * @param peerId
- * a short name that identifies the peer
+ * Called before get the configured ReplicationPeerConfig for the specified peer
+ * @param peerId a short name that identifies the peer
*/
default void preGetReplicationPeerConfig(final ObserverContext ctx,
String peerId) throws IOException {
}
/**
- * Called after get the configured ReplicationPeerConfig for the specified peer n * @param peerId
- * a short name that identifies the peer
+ * Called after get the configured ReplicationPeerConfig for the specified peer
+ * @param peerId a short name that identifies the peer
*/
default void postGetReplicationPeerConfig(final ObserverContext ctx,
String peerId) throws IOException {
}
/**
- * Called before update peerConfig for the specified peer n * @param peerId a short name that
- * identifies the peer
+ * Called before update peerConfig for the specified peer
+ * @param peerId a short name that identifies the peer
*/
default void preUpdateReplicationPeerConfig(
final ObserverContext ctx, String peerId,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index 057a9c56814..d37013534b1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -570,15 +570,15 @@ public interface RegionObserver {
/**
* This will be called for region operations where read lock is acquired in
- * {@link Region#startRegionOperation()}. n * @param operation The operation is about to be taken
- * on the region
+ * {@link Region#startRegionOperation()}.
+ * @param operation The operation is about to be taken on the region
*/
default void postStartRegionOperation(ObserverContext ctx,
Operation operation) throws IOException {
}
/**
- * Called after releasing read lock in {@link Region#closeRegionOperation()}. nn
+ * Called after releasing read lock in {@link Region#closeRegionOperation()}.
*/
default void postCloseRegionOperation(ObserverContext ctx,
Operation operation) throws IOException {
@@ -589,8 +589,8 @@ public interface RegionObserver {
* batch operation fails.
*
* Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. If
- * need a Cell reference for later use, copy the cell and use that. nn * @param success true if
- * batch operation is successful otherwise false.
+ * need a Cell reference for later use, copy the cell and use that.
+ * @param success true if batch operation is successful otherwise false.
*/
default void postBatchMutateIndispensably(ObserverContext ctx,
MiniBatchOperationInProgress miniBatchOp, boolean success) throws IOException {
@@ -1463,8 +1463,8 @@ public interface RegionObserver {
* @param fs fileystem to read from
* @param p path to the file
* @param in {@link FSDataInputStreamWrapper}
- * @param size Full size of the file n * @param r original reference file. This will be not null
- * only when reading a split file.
+ * @param size Full size of the file
+ * @param r original reference file. This will be not null only when reading a split file.
* @param reader the base reader, if not {@code null}, from previous RegionObserver in the chain
* @return a Reader instance to use instead of the base reader if overriding default behavior,
* null otherwise
@@ -1485,8 +1485,8 @@ public interface RegionObserver {
* @param fs fileystem to read from
* @param p path to the file
* @param in {@link FSDataInputStreamWrapper}
- * @param size Full size of the file n * @param r original reference file. This will be not null
- * only when reading a split file.
+ * @param size Full size of the file
+ * @param r original reference file. This will be not null only when reading a split file.
* @param reader the base reader instance
* @return The reader to use
* @deprecated For Phoenix only, StoreFileReader is not a stable interface.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
index 19fa8adc1e3..8c02b346f3c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
@@ -49,7 +49,7 @@ public class ForeignException extends IOException {
/**
* Create a new ForeignException that can be serialized. It is assumed that this came form a local
- * source. nn
+ * source.
*/
public ForeignException(String source, Throwable cause) {
super(cause);
@@ -60,7 +60,7 @@ public class ForeignException extends IOException {
/**
* Create a new ForeignException that can be serialized. It is assumed that this is locally
- * generated. nn
+ * generated.
*/
public ForeignException(String source, String msg) {
super(new IllegalArgumentException(msg));
@@ -146,8 +146,8 @@ public class ForeignException extends IOException {
}
/**
- * Takes a series of bytes and tries to generate an ForeignException instance for it. n * @return
- * the ForeignExcpetion instance
+ * Takes a series of bytes and tries to generate an ForeignException instance for it.
+ * @return the ForeignExcpetion instance
* @throws InvalidProtocolBufferException if there was deserialization problem this is thrown.
*/
public static ForeignException deserialize(byte[] bytes) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java
index 3718900cc87..09fb78468dc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java
@@ -40,7 +40,7 @@ public interface ForeignExceptionSnare {
/**
* Rethrow an exception currently held by the {@link ForeignExceptionSnare}. If there is no
- * exception this is a no-op n * all exceptions from remote sources are procedure exceptions
+ * exception this is a no-op all exceptions from remote sources are procedure exceptions
*/
void rethrowException() throws ForeignException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
index 94418f0c381..ece244fda4f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
@@ -106,7 +106,7 @@ public abstract class EventHandler implements Runnable, Comparable
}
/**
- * This method is the main processing loop to be implemented by the various subclasses. n
+ * This method is the main processing loop to be implemented by the various subclasses.
*/
public abstract void process() throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
index c74388e5b92..6f5acca3f21 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
@@ -257,7 +257,7 @@ public class ExecutorService {
}
/**
- * Submit the event to the queue for handling. n
+ * Submit the event to the queue for handling.
*/
void submit(final EventHandler event) {
// If there is a listener for this type, make sure we call the before
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
index ed3986f5883..337fde60cf7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
@@ -67,23 +67,19 @@ public class Reference {
bottom
}
- /**
- * n * @return A {@link Reference} that points at top half of a an hfile
- */
+ /** Returns A {@link Reference} that points at top half of a an hfile */
public static Reference createTopReference(final byte[] splitRow) {
return new Reference(splitRow, Range.top);
}
- /**
- * n * @return A {@link Reference} that points at the bottom half of a an hfile
- */
+ /** Returns A {@link Reference} that points at the bottom half of a an hfile */
public static Reference createBottomReference(final byte[] splitRow) {
return new Reference(splitRow, Range.bottom);
}
/**
* Constructor
- * @param splitRow This is row we are splitting around. n
+ * @param splitRow This is row we are splitting around.
*/
Reference(final byte[] splitRow, final Range fr) {
this.splitkey = splitRow == null ? null : KeyValueUtil.createFirstOnRow(splitRow).getKey();
@@ -102,15 +98,13 @@ public class Reference {
}
/**
- * n
- */
+ * */
public Range getFileRegion() {
return this.region;
}
/**
- * n
- */
+ * */
public byte[] getSplitKey() {
return splitkey;
}
@@ -151,7 +145,8 @@ public class Reference {
}
/**
- * Read a Reference from FileSystem. nn * @return New Reference made from passed p n
+ * Read a Reference from FileSystem.
+ * @return New Reference made from passed p
*/
public static Reference read(final FileSystem fs, final Path p) throws IOException {
InputStream in = fs.open(p);
@@ -198,7 +193,7 @@ public class Reference {
/**
* Use this when writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the
* delimiter, pb reads to EOF which may not be what you want).
- * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. n
+ * @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
*/
byte[] toByteArray() throws IOException {
return ProtobufUtil.prependPBMagic(convert().toByteArray());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index 8419ccb6c1c..4e795ec75e7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -95,7 +95,7 @@ public interface BlockCache extends Iterable {
int evictBlocksByHfileName(String hfileName);
/**
- * Get the statistics for this block cache. n
+ * Get the statistics for this block cache.
*/
CacheStats getStats();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
index daa49d26a23..e6a4b609bc7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
@@ -74,9 +74,7 @@ public class BlockCacheUtil {
}
}).setPrettyPrinting().create();
- /**
- * n * @return The block content as String.
- */
+ /** Returns The block content as String. */
public static String toString(final CachedBlock cb, final long now) {
return "filename=" + cb.getFilename() + ", " + toStringMinusFileName(cb, now);
}
@@ -142,9 +140,7 @@ public class BlockCacheUtil {
return GSON.toJson(bc);
}
- /**
- * n * @return The block content of bc as a String minus the filename.
- */
+ /** Returns The block content of bc as a String minus the filename. */
public static String toStringMinusFileName(final CachedBlock cb, final long now) {
return "offset=" + cb.getOffset() + ", size=" + cb.getSize() + ", age="
+ (now - cb.getCachedTime()) + ", type=" + cb.getBlockType() + ", priority="
@@ -281,9 +277,7 @@ public class BlockCacheUtil {
new ConcurrentSkipListMap<>();
FastLongHistogram hist = new FastLongHistogram();
- /**
- * n * @return True if full.... if we won't be adding any more.
- */
+ /** Returns True if full.... if we won't be adding any more. */
public boolean update(final CachedBlock cb) {
if (isFull()) return true;
NavigableSet set = this.cachedBlockByFile.get(cb.getFilename());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCompressedSizePredicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCompressedSizePredicator.java
index a90e04fe5ad..1b2fdc64197 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCompressedSizePredicator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCompressedSizePredicator.java
@@ -52,7 +52,7 @@ public interface BlockCompressedSizePredicator {
/**
* Decides if the block should be finished based on the comparison of its uncompressed size
* against an adjusted size based on a predicated compression factor.
- * @param uncompressed true if the block should be finished. n
+ * @param uncompressed true if the block should be finished.
*/
boolean shouldFinishBlock(int uncompressed);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index 27f75e4eee6..ff796b2d4f7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -202,7 +202,7 @@ public class CacheConfig {
}
/**
- * Constructs a cache configuration copied from the specified configuration. n
+ * Constructs a cache configuration copied from the specified configuration.
*/
public CacheConfig(CacheConfig cacheConf) {
this.cacheDataOnRead = cacheConf.cacheDataOnRead;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
index 2fe50381b77..4e5dfe34df6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
@@ -31,7 +31,7 @@ public interface CacheableDeserializer {
/**
* @param b ByteBuff to deserialize the Cacheable.
* @param allocator to manage NIO ByteBuffers for future allocation or de-allocation.
- * @return T the deserialized object. n
+ * @return T the deserialized object.
*/
T deserialize(ByteBuff b, ByteBuffAllocator allocator) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
index 2241a158efb..bb253e050fe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
@@ -83,9 +83,9 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase
private BloomType bloomType;
/**
- * n * each chunk's size in bytes. The real chunk size might be different as required by the fold
- * factor. n * target false positive rate n * hash function type to use n * maximum degree of
- * folding allowed n * the bloom type
+ * each chunk's size in bytes. The real chunk size might be different as required by the fold
+ * factor. target false positive rate hash function type to use maximum degree of folding allowed
+ * the bloom type
*/
public CompoundBloomFilterWriter(int chunkByteSizeHint, float errorRate, int hashType,
int maxFold, boolean cacheOnWrite, CellComparator comparator, BloomType bloomType) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index 6e72890be12..b5a5095c336 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -138,7 +138,7 @@ public class HFileBlockIndex {
}
/**
- * n * from 0 to {@link #getRootBlockCount() - 1}
+ * from 0 to {@link #getRootBlockCount() - 1}
*/
public byte[] getRootBlockKey(int i) {
return blockKeys[i];
@@ -256,7 +256,7 @@ public class HFileBlockIndex {
}
/**
- * n * from 0 to {@link #getRootBlockCount() - 1}
+ * from 0 to {@link #getRootBlockCount() - 1}
*/
public Cell getRootBlockKey(int i) {
return blockKeys[i];
@@ -521,7 +521,7 @@ public class HFileBlockIndex {
}
/**
- * n * from 0 to {@link #getRootBlockCount() - 1}
+ * from 0 to {@link #getRootBlockCount() - 1}
*/
public Cell getRootBlockKey(int i) {
return seeker.getRootBlockKey(i);
@@ -600,12 +600,12 @@ public class HFileBlockIndex {
/**
* Return the data block which contains this key. This function will only be called when the
* HFile version is larger than 1.
- * @param key the key we are looking for
- * @param currentBlock the current block, to avoid re-reading the same block nnn * @param
- * expectedDataBlockEncoding the data block encoding the caller is expecting
- * the data block to be in, or null to not perform this check and return the
- * block irrespective of the encoding
- * @return reader a basic way to load blocks n
+ * @param key the key we are looking for
+ * @param currentBlock the current block, to avoid re-reading the same block
+ * @param expectedDataBlockEncoding the data block encoding the caller is expecting the data
+ * block to be in, or null to not perform this check and return
+ * the block irrespective of the encoding
+ * @return reader a basic way to load blocks
*/
public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boolean cacheBlocks,
boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding,
@@ -629,7 +629,7 @@ public class HFileBlockIndex {
* block to be in, or null to not perform this check and return
* the block irrespective of the encoding.
* @return the BlockWithScanInfo which contains the DataBlock with other scan info such as
- * nextIndexedKey. n
+ * nextIndexedKey.
*/
public abstract BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock,
boolean cacheBlocks, boolean pread, boolean isCompaction,
@@ -665,8 +665,8 @@ public class HFileBlockIndex {
}
/**
- * Finds the root-level index block containing the given key. n * Key to find n * the comparator
- * to be used
+ * Finds the root-level index block containing the given key. Key to find the comparator to be
+ * used
* @return Offset of block containing key (between 0 and the number of blocks - 1)
* or -1 if this file does not contain the request.
*/
@@ -677,7 +677,7 @@ public class HFileBlockIndex {
CellComparator comp);
/**
- * Finds the root-level index block containing the given key. n * Key to find
+ * Finds the root-level index block containing the given key. Key to find
* @return Offset of block containing key (between 0 and the number of blocks - 1)
* or -1 if this file does not contain the request.
*/
@@ -690,13 +690,13 @@ public class HFileBlockIndex {
}
/**
- * Finds the root-level index block containing the given key. n * Key to find
+ * Finds the root-level index block containing the given key. Key to find
*/
public abstract int rootBlockContainingKey(final Cell key);
/**
- * The indexed key at the ith position in the nonRootIndex. The position starts at 0. n * @param
- * i the ith position
+ * The indexed key at the ith position in the nonRootIndex. The position starts at 0.
+ * @param i the ith position
* @return The indexed key at the ith position in the nonRootIndex.
*/
protected byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) {
@@ -728,11 +728,11 @@ public class HFileBlockIndex {
/**
* Performs a binary search over a non-root level index block. Utilizes the secondary index,
- * which records the offsets of (offset, onDiskSize, firstKey) tuples of all entries. n * the
- * key we are searching for offsets to individual entries in the blockIndex buffer n * the
- * non-root index block buffer, starting with the secondary index. The position is ignored.
+ * which records the offsets of (offset, onDiskSize, firstKey) tuples of all entries. the key we
+ * are searching for offsets to individual entries in the blockIndex buffer the non-root index
+ * block buffer, starting with the secondary index. The position is ignored.
* @return the index i in [0, numEntries - 1] such that keys[i] <= key < keys[i + 1], if keys is
- * the array of all keys being searched, or -1 otherwise n
+ * the array of all keys being searched, or -1 otherwise
*/
static int binarySearchNonRootIndex(Cell key, ByteBuff nonRootIndex,
CellComparator comparator) {
@@ -809,8 +809,8 @@ public class HFileBlockIndex {
/**
* Search for one key using the secondary index in a non-root block. In case of success,
* positions the provided buffer at the entry of interest, where the file offset and the
- * on-disk-size can be read. n * a non-root block without header. Initial position does not
- * matter. n * the byte array containing the key
+ * on-disk-size can be read. a non-root block without header. Initial position does not matter.
+ * the byte array containing the key
* @return the index position where the given key was found, otherwise return -1 in the case the
* given key is before the first key.
*/
@@ -838,7 +838,7 @@ public class HFileBlockIndex {
* the root level by {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the offset
* that function returned.
* @param in the buffered input stream or wrapped byte input stream
- * @param numEntries the number of root-level index entries n
+ * @param numEntries the number of root-level index entries
*/
public void readRootIndex(DataInput in, final int numEntries) throws IOException {
blockOffsets = new long[numEntries];
@@ -866,7 +866,7 @@ public class HFileBlockIndex {
* that function returned.
* @param blk the HFile block
* @param numEntries the number of root-level index entries
- * @return the buffered input stream or wrapped byte input stream n
+ * @return the buffered input stream or wrapped byte input stream
*/
public DataInputStream readRootIndex(HFileBlock blk, final int numEntries) throws IOException {
DataInputStream in = blk.getByteStream();
@@ -879,7 +879,7 @@ public class HFileBlockIndex {
* {@link #readRootIndex(DataInput, int)}, but also reads metadata necessary to compute the
* mid-key in a multi-level index.
* @param blk the HFile block
- * @param numEntries the number of root-level index entries n
+ * @param numEntries the number of root-level index entries
*/
public void readMultiLevelIndexRoot(HFileBlock blk, final int numEntries) throws IOException {
DataInputStream in = readRootIndex(blk, numEntries);
@@ -1040,7 +1040,7 @@ public class HFileBlockIndex {
* there is no inline block index anymore, so we only write that level of block index to disk as
* the root level.
* @param out FSDataOutputStream
- * @return position at which we entered the root-level index. n
+ * @return position at which we entered the root-level index.
*/
public long writeIndexBlocks(FSDataOutputStream out) throws IOException {
if (curInlineChunk != null && curInlineChunk.getNumEntries() != 0) {
@@ -1100,7 +1100,7 @@ public class HFileBlockIndex {
* Writes the block index data as a single level only. Does not do any block framing.
* @param out the buffered output stream to write the index to. Typically a stream
* writing into an {@link HFile} block.
- * @param description a short description of the index being written. Used in a log message. n
+ * @param description a short description of the index being written. Used in a log message.
*/
public void writeSingleLevelIndex(DataOutput out, String description) throws IOException {
expectNumLevels(1);
@@ -1123,10 +1123,11 @@ public class HFileBlockIndex {
/**
* Split the current level of the block index into intermediate index blocks of permitted size
* and write those blocks to disk. Return the next level of the block index referencing those
- * intermediate-level blocks. n * @param currentLevel the current level of the block index, such
- * as the a chunk referencing all leaf-level index blocks
+ * intermediate-level blocks.
+ * @param currentLevel the current level of the block index, such as the a chunk referencing all
+ * leaf-level index blocks
* @return the parent level block index, which becomes the root index after a few (usually zero)
- * iterations n
+ * iterations
*/
private BlockIndexChunk writeIntermediateLevel(FSDataOutputStream out,
BlockIndexChunk currentLevel) throws IOException {
@@ -1245,7 +1246,7 @@ public class HFileBlockIndex {
/**
* Write out the current inline index block. Inline blocks are non-root blocks, so the non-root
- * index format is used. n
+ * index format is used.
*/
@Override
public void writeInlineBlock(DataOutput out) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java
index cb2d5bbcfb6..1629536c148 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java
@@ -39,20 +39,20 @@ public interface HFileDataBlockEncoder {
/**
* Starts encoding for a block of KeyValues. Call
* {@link #endBlockEncoding(HFileBlockEncodingContext, DataOutputStream, byte[], BlockType)} to
- * finish encoding of a block. nnn
+ * finish encoding of a block.
*/
void startBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out)
throws IOException;
/**
- * Encodes a KeyValue. nnnn
+ * Encodes a KeyValue.
*/
void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out)
throws IOException;
/**
* Ends encoding for a block of KeyValues. Gives a chance for the encoder to do the finishing
- * stuff for the encoded block. It must be called at the end of block encoding. nnnnn
+ * stuff for the encoded block. It must be called at the end of block encoding.
*/
void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out,
byte[] uncompressedBytesWithHeader, BlockType blockType) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index 1caca6abf4e..df58c94464a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -516,8 +516,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
* Within a loaded block, seek looking for the last key that is smaller than (or equal to?) the
* key we are interested in. A note on the seekBefore: if you have seekBefore = true, AND the
* first key in the block = key, then you'll get thrown exceptions. The caller has to check for
- * that case and load the previous block as appropriate. n * the key to find n * find the key
- * before the given key in case of exact match.
+ * that case and load the previous block as appropriate. the key to find find the key before the
+ * given key in case of exact match.
* @return 0 in case of an exact key match, 1 in case of an inexact match, -2 in case of an
* inexact match and furthermore, the input key less than the first key of current
* block(e.g. using a faked index key)
@@ -1641,10 +1641,10 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
/**
* Create a Scanner on this file. No seeks or reads are done on creation. Call
* {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up
- * in a Scanner. Letting go of your references to the scanner is sufficient. n * Store
- * configuration. n * True if we should cache blocks read in by this scanner. n * Use positional
- * read rather than seek+read if true (pread is better for random reads, seek+read is better
- * scanning). n * is scanner being used for a compaction?
+ * in a Scanner. Letting go of your references to the scanner is sufficient. Store configuration.
+ * True if we should cache blocks read in by this scanner. Use positional read rather than
+ * seek+read if true (pread is better for random reads, seek+read is better scanning). is scanner
+ * being used for a compaction?
* @return Scanner on this file.
*/
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
index e77b133523f..fd5c66b126b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
@@ -39,12 +39,13 @@ public interface HFileScanner extends Shipper, Closeable {
/**
* SeekTo or just before the passed cell . Examine the return code to figure whether
* we found the cell or not. Consider the cell stream of all the cells in the file,
- * c[0] .. c[n] , where there are n cells in the file. n * @return -1, if cell <
- * c[0], no position; 0, such that c[i] = cell and scanner is left in position i; and 1, such that
- * c[i] < cell, and scanner is left in position i. The scanner will position itself between
- * c[i] and c[i+1] where c[i] < cell <= c[i+1]. If there is no cell c[i+1] greater than or
- * equal to the input cell, then the scanner will position itself at the end of the file and
- * next() will return false when it is called. n
+ * c[0] .. c[n] , where there are n cells in the file.
+ * @return -1, if cell < c[0], no position; 0, such that c[i] = cell and scanner is left in
+ * position i; and 1, such that c[i] < cell, and scanner is left in position i. The
+ * scanner will position itself between c[i] and c[i+1] where c[i] < cell <= c[i+1].
+ * If there is no cell c[i+1] greater than or equal to the input cell, then the scanner
+ * will position itself at the end of the file and next() will return false when it is
+ * called.
*/
int seekTo(Cell cell) throws IOException;
@@ -59,7 +60,7 @@ public interface HFileScanner extends Shipper, Closeable {
* false when it is called.
* @param cell Cell to find (should be non-null)
* @return -1, if cell < c[0], no position; 0, such that c[i] = cell and scanner is left in
- * position i; and 1, such that c[i] < cell, and scanner is left in position i. n
+ * position i; and 1, such that c[i] < cell, and scanner is left in position i.
*/
int reseekTo(Cell cell) throws IOException;
@@ -69,20 +70,20 @@ public interface HFileScanner extends Shipper, Closeable {
* @param cell Cell to find
* @return false if cell <= c[0] or true with scanner in position 'i' such that: c[i] <
* cell. Furthermore: there may be a c[i+1], such that c[i] < cell <= c[i+1] but
- * there may also NOT be a c[i+1], and next() will return false (EOF). n
+ * there may also NOT be a c[i+1], and next() will return false (EOF).
*/
boolean seekBefore(Cell cell) throws IOException;
/**
* Positions this scanner at the start of the file.
* @return False if empty file; i.e. a call to next would return false and the current key and
- * value are undefined. n
+ * value are undefined.
*/
boolean seekTo() throws IOException;
/**
* Scans to the next entry in the file.
- * @return Returns false if you are at the end otherwise true if more in file. n
+ * @return Returns false if you are at the end otherwise true if more in file.
*/
boolean next() throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
index b33d471ae49..eda5cde46a1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
@@ -574,8 +574,8 @@ public class HFileWriterImpl implements HFile.Writer {
* Add a meta block to the end of the file. Call before close(). Metadata blocks are expensive.
* Fill one with a bunch of serialized data rather than do a metadata block per metadata instance.
* If metadata is small, consider adding to file info using
- * {@link #appendFileInfo(byte[], byte[])} n * name of the block n * will call readFields to get
- * data later (DO NOT REUSE)
+ * {@link #appendFileInfo(byte[], byte[])} name of the block will call readFields to get data
+ * later (DO NOT REUSE)
*/
@Override
public void appendMetaBlock(String metaBlockName, Writable content) {
@@ -723,7 +723,7 @@ public class HFileWriterImpl implements HFile.Writer {
/**
* Add key/value to file. Keys must be added in an order that agrees with the Comparator passed on
- * construction. n * Cell to add. Cannot be empty nor null.
+ * construction. Cell to add. Cannot be empty nor null.
*/
@Override
public void append(final Cell cell) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java
index 6b0c913ca1b..58229639309 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java
@@ -30,16 +30,15 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface InlineBlockWriter {
/**
- * Determines whether there is a new block to be written out. n * whether the file is being
- * closed, in which case we need to write out all available data and not wait to accumulate
- * another block
+ * Determines whether there is a new block to be written out. whether the file is being closed, in
+ * which case we need to write out all available data and not wait to accumulate another block
*/
boolean shouldWriteBlock(boolean closing);
/**
* Writes the block to the provided stream. Must not write any magic records. Called only if
- * {@link #shouldWriteBlock(boolean)} returned true. n * a stream (usually a compressing stream)
- * to write the block to
+ * {@link #shouldWriteBlock(boolean)} returned true. a stream (usually a compressing stream) to
+ * write the block to
*/
void writeInlineBlock(DataOutput out) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java
index 18d21deceb2..9e480247ee9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpIndexBlockEncoder.java
@@ -66,7 +66,7 @@ public class NoOpIndexBlockEncoder implements HFileIndexBlockEncoder {
/**
* Writes the block index chunk in the non-root index block format. This format contains the
* number of entries, an index of integer offsets for quick binary search on variable-length
- * records, and tuples of block offset, on-disk block size, and the first key for each entry. nn
+ * records, and tuples of block offset, on-disk block size, and the first key for each entry.
*/
private void writeNonRoot(BlockIndexChunk blockIndexChunk, DataOutput out) throws IOException {
// The number of entries in the block.
@@ -103,7 +103,7 @@ public class NoOpIndexBlockEncoder implements HFileIndexBlockEncoder {
* similar to the {@link HFile} version 1 block index format, except that we store on-disk size of
* the block instead of its uncompressed size.
* @param out the data output stream to write the block index to. Typically a stream writing into
- * an {@link HFile} block. n
+ * an {@link HFile} block.
*/
private void writeRoot(BlockIndexChunk blockIndexChunk, DataOutput out) throws IOException {
for (int i = 0; i < blockIndexChunk.getNumEntries(); ++i) {
@@ -443,8 +443,8 @@ public class NoOpIndexBlockEncoder implements HFileIndexBlockEncoder {
}
/**
- * The indexed key at the ith position in the nonRootIndex. The position starts at 0. n * @param
- * i the ith position
+ * The indexed key at the ith position in the nonRootIndex. The position starts at 0.
+ * @param i the ith position
* @return The indexed key at the ith position in the nonRootIndex.
*/
protected byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PreviousBlockCompressionRatePredicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PreviousBlockCompressionRatePredicator.java
index be0ee3bb9a7..c308874951e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PreviousBlockCompressionRatePredicator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PreviousBlockCompressionRatePredicator.java
@@ -50,7 +50,7 @@ public class PreviousBlockCompressionRatePredicator implements BlockCompressedSi
/**
* Returns true if the passed uncompressed size is larger than the limit calculated by
* updateLatestBlockSizes .
- * @param uncompressed true if the block should be finished. n
+ * @param uncompressed true if the block should be finished.
*/
@Override
public boolean shouldFinishBlock(int uncompressed) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/UncompressedBlockSizePredicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/UncompressedBlockSizePredicator.java
index c259375a97d..cf994ad51ce 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/UncompressedBlockSizePredicator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/UncompressedBlockSizePredicator.java
@@ -39,7 +39,7 @@ public class UncompressedBlockSizePredicator implements BlockCompressedSizePredi
/**
* Dummy implementation that always returns true. This means, we will be only considering the
* block uncompressed size for deciding when to finish a block.
- * @param uncompressed true if the block should be finished. n
+ * @param uncompressed true if the block should be finished.
*/
@Override
public boolean shouldFinishBlock(int uncompressed) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
index 54032e79c6f..0b03656d701 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
@@ -348,7 +348,7 @@ public final class BucketAllocator {
* @param availableSpace capacity of cache
* @param map A map stores the block key and BucketEntry(block's meta data like offset,
* length)
- * @param realCacheSize cached data size statistics for bucket cache n
+ * @param realCacheSize cached data size statistics for bucket cache
*/
BucketAllocator(long availableSpace, int[] bucketSizes, Map map,
LongAdder realCacheSize) throws BucketAllocatorException {
@@ -444,7 +444,8 @@ public final class BucketAllocator {
/**
* Allocate a block with specified size. Return the offset
- * @param blockSize size of block nn * @return the offset in the IOEngine
+ * @param blockSize size of block
+ * @return the offset in the IOEngine
*/
public synchronized long allocateBlock(int blockSize)
throws CacheFullException, BucketAllocatorException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 18295f285c4..6849b176f72 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -384,7 +384,8 @@ public class BucketCache implements BlockCache, HeapSize {
}
/**
- * Get the IOEngine from the IO engine name nnn * @return the IOEngine n
+ * Get the IOEngine from the IO engine name
+ * @return the IOEngine
*/
private IOEngine getIOEngineFromName(String ioEngineName, long capacity, String persistencePath)
throws IOException {
@@ -1581,7 +1582,7 @@ public class BucketCache implements BlockCache, HeapSize {
}
/**
- * Only used in test n
+ * Only used in test
*/
void stopWriterThreads() throws InterruptedException {
for (WriterThread writerThread : writerThreads) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
index 78166e88ffd..6dc3742a660 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
@@ -64,8 +64,8 @@ public class ByteBufferIOEngine implements IOEngine {
private final long capacity;
/**
- * Construct the ByteBufferIOEngine with the given capacity n * @throws IOException ideally here
- * no exception to be thrown from the allocator
+ * Construct the ByteBufferIOEngine with the given capacity
+ * @throws IOException ideally here no exception to be thrown from the allocator
*/
public ByteBufferIOEngine(long capacity) throws IOException {
this.capacity = capacity;
@@ -80,7 +80,7 @@ public class ByteBufferIOEngine implements IOEngine {
}
/**
- * Memory IO engine is always unable to support persistent storage for the cache n
+ * Memory IO engine is always unable to support persistent storage for the cache
*/
@Override
public boolean isPersistent() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
index 511d8afff46..370343b1b25 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
@@ -110,7 +110,7 @@ public class FileIOEngine extends PersistentIOEngine {
}
/**
- * File IO engine is always able to support persistent storage for the cache n
+ * File IO engine is always able to support persistent storage for the cache
*/
@Override
public boolean isPersistent() {
@@ -162,7 +162,7 @@ public class FileIOEngine extends PersistentIOEngine {
/**
* Transfers data from the given byte buffer to file
* @param srcBuffer the given byte buffer from which bytes are to be read
- * @param offset The offset in the file where the first byte to be written n
+ * @param offset The offset in the file where the first byte to be written
*/
@Override
public void write(ByteBuffer srcBuffer, long offset) throws IOException {
@@ -170,7 +170,7 @@ public class FileIOEngine extends PersistentIOEngine {
}
/**
- * Sync the data to file after writing n
+ * Sync the data to file after writing
*/
@Override
public void sync() throws IOException {
@@ -254,8 +254,8 @@ public class FileIOEngine extends PersistentIOEngine {
}
/**
- * Get the absolute offset in given file with the relative global offset. nn * @return the
- * absolute offset
+ * Get the absolute offset in given file with the relative global offset.
+ * @return the absolute offset
*/
private long getAbsoluteOffsetInFile(int fileNum, long globalOffset) {
return globalOffset - fileNum * sizePerFile;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java
index b09e0963ca2..b7066f149fd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java
@@ -95,7 +95,7 @@ public abstract class FileMmapIOEngine extends PersistentIOEngine {
}
/**
- * File IO engine is always able to support persistent storage for the cache n
+ * File IO engine is always able to support persistent storage for the cache
*/
@Override
public boolean isPersistent() {
@@ -109,7 +109,7 @@ public abstract class FileMmapIOEngine extends PersistentIOEngine {
/**
* Transfers data from the given byte buffer to file
* @param srcBuffer the given byte buffer from which bytes are to be read
- * @param offset The offset in the file where the first byte to be written n
+ * @param offset The offset in the file where the first byte to be written
*/
@Override
public void write(ByteBuffer srcBuffer, long offset) throws IOException {
@@ -122,7 +122,7 @@ public abstract class FileMmapIOEngine extends PersistentIOEngine {
}
/**
- * Sync the data to file after writing n
+ * Sync the data to file after writing
*/
@Override
public void sync() throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java
index a7b73a5d886..46112da3f2c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java
@@ -52,19 +52,19 @@ public interface IOEngine {
/**
* Transfers data from the given byte buffer to IOEngine
* @param srcBuffer the given byte buffer from which bytes are to be read
- * @param offset The offset in the IO engine where the first byte to be written n
+ * @param offset The offset in the IO engine where the first byte to be written
*/
void write(ByteBuffer srcBuffer, long offset) throws IOException;
/**
* Transfers the data from the given MultiByteBuffer to IOEngine
* @param srcBuffer the given MultiBytebufffers from which bytes are to be read
- * @param offset the offset in the IO engine where the first byte to be written n
+ * @param offset the offset in the IO engine where the first byte to be written
*/
void write(ByteBuff srcBuffer, long offset) throws IOException;
/**
- * Sync the data to IOEngine after writing n
+ * Sync the data to IOEngine after writing
*/
void sync() throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
index 85bcc643558..bf014dfb530 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
@@ -77,7 +77,7 @@ public class MemorySizeUtil {
/**
* Checks whether we have enough heap memory left out after portion for Memstore and Block cache.
- * We need atleast 20% of heap left out for other RS functions. n
+ * We need atleast 20% of heap left out for other RS functions.
*/
public static void checkForClusterFreeHeapMemoryLimit(Configuration conf) {
if (conf.get(MEMSTORE_SIZE_OLD_KEY) != null) {
@@ -102,7 +102,7 @@ public class MemorySizeUtil {
}
/**
- * Retrieve global memstore configured size as percentage of total heap. nn
+ * Retrieve global memstore configured size as percentage of total heap.
*/
public static float getGlobalMemStoreHeapPercent(final Configuration c,
final boolean logInvalid) {
@@ -178,7 +178,8 @@ public class MemorySizeUtil {
/**
* Returns the onheap global memstore limit based on the config
- * 'hbase.regionserver.global.memstore.size'. n * @return the onheap global memstore limt
+ * 'hbase.regionserver.global.memstore.size'.
+ * @return the onheap global memstore limt
*/
public static long getOnheapGlobalMemStoreSize(Configuration conf) {
long max = -1L;
@@ -191,7 +192,7 @@ public class MemorySizeUtil {
}
/**
- * Retrieve configured size for on heap block cache as percentage of total heap. n
+ * Retrieve configured size for on heap block cache as percentage of total heap.
*/
public static float getBlockCacheHeapPercent(final Configuration conf) {
// L1 block cache is always on heap
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java
index ef19dea2dfb..2c9fb0b2a2e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java
@@ -34,13 +34,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader
public interface PriorityFunction {
/**
* Returns the 'priority type' of the specified request. The returned value is mainly used to
- * select the dispatch queue. nnn * @return Priority of this request.
+ * select the dispatch queue.
+ * @return Priority of this request.
*/
int getPriority(RequestHeader header, Message param, User user);
/**
* Returns the deadline of the specified request. The returned value is used to sort the dispatch
- * queue. nn * @return Deadline of this request. 0 now, otherwise msec of 'delay'
+ * queue.
+ * @return Deadline of this request. 0 now, otherwise msec of 'delay'
*/
long getDeadline(RequestHeader header, Message param);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java
index 59008cb08a4..197ddb71d7e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java
@@ -96,7 +96,7 @@ public interface RpcCall extends RpcCallContext {
/**
* Send the response of this RPC call. Implementation provides the underlying facility
- * (connection, etc) to send. n
+ * (connection, etc) to send.
*/
void sendResponseIfReady() throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
index 95843652abe..479a83f914a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
@@ -68,7 +68,7 @@ public interface RpcCallContext {
/**
* Sets a callback which has to be executed at the end of this RPC call. Such a callback is an
- * optional one for any Rpc call. n
+ * optional one for any Rpc call.
*/
void setCallBack(RpcCallback callback);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java
index bab3e80d322..d8605bb122e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java
@@ -25,8 +25,7 @@ class RpcSchedulerContext extends RpcScheduler.Context {
private final RpcServer rpcServer;
/**
- * n
- */
+ * */
RpcSchedulerContext(final RpcServer rpcServer) {
this.rpcServer = rpcServer;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 15caac476f3..6e4b5ef42f2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -255,11 +255,13 @@ public abstract class RpcServer implements RpcServerInterface, ConfigurationObse
/**
* Constructs a server listening on the named port and address.
- * @param server hosting instance of {@link Server}. We will do authentications if an
- * instance else pass null for no authentication check.
- * @param name Used keying this rpc servers' metrics and for naming the Listener thread.
- * @param services A list of services.
- * @param bindAddress Where to listen nn * @param reservoirEnabled Enable ByteBufferPool or not.
+ * @param server hosting instance of {@link Server}. We will do authentications if an
+ * instance else pass null for no authentication check.
+ * @param name Used keying this rpc servers' metrics and for naming the Listener
+ * thread.
+ * @param services A list of services.
+ * @param bindAddress Where to listen
+ * @param reservoirEnabled Enable ByteBufferPool or not.
*/
public RpcServer(final Server server, final String name,
final List services, final InetSocketAddress bindAddress,
@@ -776,7 +778,6 @@ public abstract class RpcServer implements RpcServerInterface, ConfigurationObse
/**
* Returns the remote side ip address when invoked inside an RPC Returns null incase of an error.
- * n
*/
public static InetAddress getRemoteIp() {
RpcCall call = CurCall.get();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java
index 80549067972..2c0dd1cc2b0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java
@@ -64,7 +64,7 @@ public interface RpcServerInterface {
void addCallSize(long diff);
/**
- * Refresh authentication manager policy. n
+ * Refresh authentication manager policy.
*/
void refreshAuthManager(Configuration conf, PolicyProvider pp);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
index efb6630ad9e..b09f33c47f9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
@@ -167,7 +167,7 @@ abstract class ServerRpcConnection implements Closeable {
}
/**
- * Set up cell block codecs n
+ * Set up cell block codecs
*/
private void setupCellBlockCodecs() throws FatalConnectionException {
// TODO: Plug in other supported decoders.
@@ -500,8 +500,8 @@ abstract class ServerRpcConnection implements Closeable {
protected abstract void doRespond(RpcResponse resp) throws IOException;
/**
- * n * Has the request header and the request param and optionally encoded data buffer all in this
- * one array.
+ * Has the request header and the request param and optionally encoded data buffer all in this one
+ * array.
*
* Will be overridden in tests.
*/
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
index 78ff4bf69d1..92b38757031 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
@@ -57,10 +57,10 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs
private Abortable abortable = null;
/**
- * n * @param handlerCount the number of handler threads that will be used to process calls
+ * @param handlerCount the number of handler threads that will be used to process calls
* @param priorityHandlerCount How many threads for priority handling.
- * @param replicationHandlerCount How many threads for replication handling. n * @param priority
- * Function to extract request priority.
+ * @param replicationHandlerCount How many threads for replication handling.
+ * @param priority Function to extract request priority.
*/
public SimpleRpcScheduler(Configuration conf, int handlerCount, int priorityHandlerCount,
int replicationHandlerCount, int metaTransitionHandler, PriorityFunction priority,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
index 49c861b14ff..cb157a592a9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
@@ -354,11 +354,13 @@ public class SimpleRpcServer extends RpcServer {
/**
* Constructs a server listening on the named port and address.
- * @param server hosting instance of {@link Server}. We will do authentications if an
- * instance else pass null for no authentication check.
- * @param name Used keying this rpc servers' metrics and for naming the Listener thread.
- * @param services A list of services.
- * @param bindAddress Where to listen nn * @param reservoirEnabled Enable ByteBufferPool or not.
+ * @param server hosting instance of {@link Server}. We will do authentications if an
+ * instance else pass null for no authentication check.
+ * @param name Used keying this rpc servers' metrics and for naming the Listener
+ * thread.
+ * @param services A list of services.
+ * @param bindAddress Where to listen
+ * @param reservoirEnabled Enable ByteBufferPool or not.
*/
public SimpleRpcServer(final Server server, final String name,
final List services, final InetSocketAddress bindAddress,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java
index db1b380361d..cbf023e2ba9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java
@@ -245,7 +245,7 @@ class SimpleRpcServerResponder extends Thread {
/**
* Process the response for this call. You need to have the lock on
* {@link org.apache.hadoop.hbase.ipc.SimpleServerRpcConnection#responseWriteLock}
- * @return true if we proceed the call fully, false otherwise. n
+ * @return true if we proceed the call fully, false otherwise.
*/
private boolean processResponse(SimpleServerRpcConnection conn, RpcResponse resp)
throws IOException {
@@ -283,8 +283,8 @@ class SimpleRpcServerResponder extends Thread {
/**
* Process all the responses for this connection
- * @return true if all the calls were processed or that someone else is doing it. false if there *
- * is still some work to do. In this case, we expect the caller to delay us. n
+ * @return true if all the calls were processed or that someone else is doing it. false if there
+ * is still some work to do. In this case, we expect the caller to delay us.
*/
private boolean processAllResponses(final SimpleServerRpcConnection connection)
throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
index 4c8925d7274..ac705d7a26f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
@@ -260,7 +260,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
/**
* Read off the wire. If there is not enough data to read, update the connection state with what
* we have and returns.
- * @return Returns -1 if failure (and caller will close connection), else zero or more. nn
+ * @return Returns -1 if failure (and caller will close connection), else zero or more.
*/
public int readAndProcess() throws IOException, InterruptedException {
// If we have not read the connection setup preamble, look to see if that is on the wire.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java
index 28795eab28e..41f5709e911 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java
@@ -64,7 +64,7 @@ public class DrainingServerTracker extends ZKListener {
/**
* Starts the tracking of draining RegionServers.
*
- * All Draining RSs will be tracked after this method is called. n
+ * All Draining RSs will be tracked after this method is called.
*/
public void start() throws KeeperException, IOException {
watcher.registerListener(this);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index c5c0a7cb7e5..7dc08d76aba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -3261,7 +3261,8 @@ public class HMaster extends HBaseServerBase implements Maste
}
/**
- * Utility for constructing an instance of the passed HMaster class. n * @return HMaster instance.
+ * Utility for constructing an instance of the passed HMaster class.
+ * @return HMaster instance.
*/
public static HMaster constructMaster(Class extends HMaster> masterClass,
final Configuration conf) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 6295fa63d50..493d0e3ef86 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -813,7 +813,7 @@ public class MasterCoprocessorHost
/**
* Invoked just before calling the split region procedure
* @param tableName the table where the region belongs to
- * @param splitRow the split point n
+ * @param splitRow the split point
*/
public void preSplitRegion(final TableName tableName, final byte[] splitRow) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
@@ -828,7 +828,7 @@ public class MasterCoprocessorHost
* Invoked just before a split
* @param tableName the table where the region belongs to
* @param splitRow the split point
- * @param user the user n
+ * @param user the user
*/
public void preSplitRegionAction(final TableName tableName, final byte[] splitRow,
final User user) throws IOException {
@@ -844,7 +844,7 @@ public class MasterCoprocessorHost
* Invoked just after a split
* @param regionInfoA the new left-hand daughter region
* @param regionInfoB the new right-hand daughter region
- * @param user the user n
+ * @param user the user
*/
public void postCompletedSplitRegionAction(final RegionInfo regionInfoA,
final RegionInfo regionInfoB, final User user) throws IOException {
@@ -857,8 +857,8 @@ public class MasterCoprocessorHost
}
/**
- * This will be called before update META step as part of split table region procedure. nn
- * * @param user the user n
+ * This will be called before update META step as part of split table region procedure.
+ * @param user the user
*/
public void preSplitBeforeMETAAction(final byte[] splitKey, final List metaEntries,
final User user) throws IOException {
@@ -872,7 +872,7 @@ public class MasterCoprocessorHost
/**
* This will be called after update META step as part of split table region procedure.
- * @param user the user n
+ * @param user the user
*/
public void preSplitAfterMETAAction(final User user) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) {
@@ -885,7 +885,7 @@ public class MasterCoprocessorHost
/**
* Invoked just after the rollback of a failed split
- * @param user the user n
+ * @param user the user
*/
public void postRollBackSplitRegionAction(final User user) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) {
@@ -899,7 +899,7 @@ public class MasterCoprocessorHost
/**
* Invoked just before a merge
* @param regionsToMerge the regions to merge
- * @param user the user n
+ * @param user the user
*/
public void preMergeRegionsAction(final RegionInfo[] regionsToMerge, final User user)
throws IOException {
@@ -915,7 +915,7 @@ public class MasterCoprocessorHost
* Invoked after completing merge regions operation
* @param regionsToMerge the regions to merge
* @param mergedRegion the new merged region
- * @param user the user n
+ * @param user the user
*/
public void postCompletedMergeRegionsAction(final RegionInfo[] regionsToMerge,
final RegionInfo mergedRegion, final User user) throws IOException {
@@ -931,7 +931,7 @@ public class MasterCoprocessorHost
* Invoked before merge regions operation writes the new region to hbase:meta
* @param regionsToMerge the regions to merge
* @param metaEntries the meta entry
- * @param user the user n
+ * @param user the user
*/
public void preMergeRegionsCommit(final RegionInfo[] regionsToMerge,
final @MetaMutationAnnotation List metaEntries, final User user) throws IOException {
@@ -947,7 +947,7 @@ public class MasterCoprocessorHost
* Invoked after merge regions operation writes the new region to hbase:meta
* @param regionsToMerge the regions to merge
* @param mergedRegion the new merged region
- * @param user the user n
+ * @param user the user
*/
public void postMergeRegionsCommit(final RegionInfo[] regionsToMerge,
final RegionInfo mergedRegion, final User user) throws IOException {
@@ -962,7 +962,7 @@ public class MasterCoprocessorHost
/**
* Invoked after rollback merge regions operation
* @param regionsToMerge the regions to merge
- * @param user the user n
+ * @param user the user
*/
public void postRollBackMergeRegionsAction(final RegionInfo[] regionsToMerge, final User user)
throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index a5a0b5d629f..5a43cd98feb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -306,7 +306,7 @@ public class MasterFileSystem {
}
/**
- * Make sure the directories under rootDir have good permissions. Create if necessary. nn
+ * Make sure the directories under rootDir have good permissions. Create if necessary.
*/
private void checkSubDir(final Path p, final String dirPermsConfName) throws IOException {
FileSystem fs = p.getFileSystem(conf);
@@ -335,7 +335,7 @@ public class MasterFileSystem {
/**
* Check permissions for bulk load staging directory. This directory has special hidden
- * permissions. Create it if necessary. n
+ * permissions. Create it if necessary.
*/
private void checkStagingDir() throws IOException {
Path p = new Path(this.rootdir, HConstants.BULKLOAD_STAGING_DIR_NAME);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 1ac42008df1..4a490b1e127 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1110,7 +1110,7 @@ public class MasterRpcServices extends HBaseRpcServicesBase
/**
* Get the number of regions of the table that have been updated by the alter.
* @return Pair indicating the number of regions updated Pair.getFirst is the regions that are yet
- * to be updated Pair.getSecond is the total number of regions of the table n
+ * to be updated Pair.getSecond is the total number of regions of the table
*/
@Override
public GetSchemaAlterStatusResponse getSchemaAlterStatus(RpcController controller,
@@ -1137,7 +1137,7 @@ public class MasterRpcServices extends HBaseRpcServicesBase
* Get list of TableDescriptors for requested tables.
* @param c Unused (set to null).
* @param req GetTableDescriptorsRequest that contains: - tableNames: requested tables, or if
- * empty, all are requested. nn
+ * empty, all are requested.
*/
@Override
public GetTableDescriptorsResponse getTableDescriptors(RpcController c,
@@ -1174,7 +1174,7 @@ public class MasterRpcServices extends HBaseRpcServicesBase
/**
* Get list of userspace table names
* @param controller Unused (set to null).
- * @param req GetTableNamesRequest nn
+ * @param req GetTableNamesRequest
*/
@Override
public GetTableNamesResponse getTableNames(RpcController controller, GetTableNamesRequest req)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index fbeb155a88b..c84a58b7771 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -116,7 +116,7 @@ public interface MasterServices extends Server {
/**
* Check table is modifiable; i.e. exists and is offline.
- * @param tableName Name of table to check. nnn
+ * @param tableName Name of table to check.
*/
// We actually throw the exceptions mentioned in the
void checkTableModifiable(final TableName tableName)
@@ -125,8 +125,8 @@ public interface MasterServices extends Server {
/**
* Create a table using the given table definition.
* @param desc The table definition
- * @param splitKeys Starting row keys for the initial table regions. If null nn * a single region
- * is created.
+ * @param splitKeys Starting row keys for the initial table regions. If null a single region is
+ * created.
*/
long createTable(final TableDescriptor desc, final byte[][] splitKeys, final long nonceGroup,
final long nonce) throws IOException;
@@ -139,7 +139,7 @@ public interface MasterServices extends Server {
/**
* Delete a table
- * @param tableName The table name nnn
+ * @param tableName The table name
*/
long deleteTable(final TableName tableName, final long nonceGroup, final long nonce)
throws IOException;
@@ -147,7 +147,7 @@ public interface MasterServices extends Server {
/**
* Truncate a table
* @param tableName The table name
- * @param preserveSplits True if the splits should be preserved nnn
+ * @param preserveSplits True if the splits should be preserved
*/
public long truncateTable(final TableName tableName, final boolean preserveSplits,
final long nonceGroup, final long nonce) throws IOException;
@@ -155,7 +155,7 @@ public interface MasterServices extends Server {
/**
* Modify the descriptor of an existing table
* @param tableName The table name
- * @param descriptor The updated table descriptor nnn
+ * @param descriptor The updated table descriptor
*/
long modifyTable(final TableName tableName, final TableDescriptor descriptor,
final long nonceGroup, final long nonce) throws IOException;
@@ -168,14 +168,14 @@ public interface MasterServices extends Server {
/**
* Enable an existing table
- * @param tableName The table name nnn
+ * @param tableName The table name
*/
long enableTable(final TableName tableName, final long nonceGroup, final long nonce)
throws IOException;
/**
* Disable an existing table
- * @param tableName The table name nnn
+ * @param tableName The table name
*/
long disableTable(final TableName tableName, final long nonceGroup, final long nonce)
throws IOException;
@@ -183,7 +183,7 @@ public interface MasterServices extends Server {
/**
* Add a new column to an existing table
* @param tableName The table name
- * @param column The column definition nnn
+ * @param column The column definition
*/
long addColumn(final TableName tableName, final ColumnFamilyDescriptor column,
final long nonceGroup, final long nonce) throws IOException;
@@ -191,7 +191,7 @@ public interface MasterServices extends Server {
/**
* Modify the column descriptor of an existing column in an existing table
* @param tableName The table name
- * @param descriptor The updated column definition nnn
+ * @param descriptor The updated column definition
*/
long modifyColumn(final TableName tableName, final ColumnFamilyDescriptor descriptor,
final long nonceGroup, final long nonce) throws IOException;
@@ -205,7 +205,7 @@ public interface MasterServices extends Server {
/**
* Delete a column from an existing table
* @param tableName The table name
- * @param columnName The column name nnn
+ * @param columnName The column name
*/
long deleteColumn(final TableName tableName, final byte[] columnName, final long nonceGroup,
final long nonce) throws IOException;
@@ -216,7 +216,7 @@ public interface MasterServices extends Server {
* @param forcible whether to force to merge even two regions are not adjacent
* @param nonceGroup used to detect duplicate
* @param nonce used to detect duplicate
- * @return procedure Id n
+ * @return procedure Id
*/
long mergeRegions(final RegionInfo[] regionsToMerge, final boolean forcible,
final long nonceGroup, final long nonce) throws IOException;
@@ -227,7 +227,7 @@ public interface MasterServices extends Server {
* @param splitRow split point
* @param nonceGroup used to detect duplicate
* @param nonce used to detect duplicate
- * @return procedure Id n
+ * @return procedure Id
*/
long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, final long nonceGroup,
final long nonce) throws IOException;
@@ -271,46 +271,46 @@ public interface MasterServices extends Server {
* Abort a procedure.
* @param procId ID of the procedure
* @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
- * @return true if aborted, false if procedure already completed or does not exist n
+ * @return true if aborted, false if procedure already completed or does not exist
*/
public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning)
throws IOException;
/**
* Get procedures
- * @return procedure list n
+ * @return procedure list
*/
public List> getProcedures() throws IOException;
/**
* Get locks
- * @return lock list n
+ * @return lock list
*/
public List getLocks() throws IOException;
/**
* Get list of table descriptors by namespace
- * @param name namespace name nn
+ * @param name namespace name
*/
public List listTableDescriptorsByNamespace(String name) throws IOException;
/**
* Get list of table names by namespace
* @param name namespace name
- * @return table names n
+ * @return table names
*/
public List listTableNamesByNamespace(String name) throws IOException;
/**
* @param table the table for which last successful major compaction time is queried
* @return the timestamp of the last successful major compaction for the passed table, or 0 if no
- * HFile resulting from a major compaction exists n
+ * HFile resulting from a major compaction exists
*/
public long getLastMajorCompactionTimestamp(TableName table) throws IOException;
/**
- * n * @return the timestamp of the last successful major compaction for the passed region or 0 if
- * no HFile resulting from a major compaction exists n
+ * Returns the timestamp of the last successful major compaction for the passed region or 0 if no
+ * HFile resulting from a major compaction exists
*/
public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java
index eaf335e52ef..43407f447f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java
@@ -55,7 +55,7 @@ public class MetricsAssignmentManager {
}
/**
- * set new value for number of regions in transition. n
+ * set new value for number of regions in transition.
*/
public void updateRITCount(final int ritCount) {
assignmentManagerSource.setRIT(ritCount);
@@ -63,21 +63,21 @@ public class MetricsAssignmentManager {
/**
* update RIT count that are in this state for more than the threshold as defined by the property
- * rit.metrics.threshold.time. n
+ * rit.metrics.threshold.time.
*/
public void updateRITCountOverThreshold(final int ritCountOverThreshold) {
assignmentManagerSource.setRITCountOverThreshold(ritCountOverThreshold);
}
/**
- * update the timestamp for oldest region in transition metrics. n
+ * update the timestamp for oldest region in transition metrics.
*/
public void updateRITOldestAge(final long timestamp) {
assignmentManagerSource.setRITOldestAge(timestamp);
}
/**
- * update the duration metrics of region is transition n
+ * update the duration metrics of region is transition
*/
public void updateRitDuration(long duration) {
assignmentManagerSource.updateRitDuration(duration);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index 39d21e32032..854c21da2bc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -174,10 +174,11 @@ public class RegionPlacementMaintainer implements Closeable {
}
/**
- * Generate the assignment plan for the existing table nnnn * @param
- * munkresForSecondaryAndTertiary if set on true the assignment plan for the tertiary and
- * secondary will be generated with Munkres algorithm, otherwise will be generated using
- * placeSecondaryAndTertiaryRS n
+ * Generate the assignment plan for the existing table
+ * @param munkresForSecondaryAndTertiary if set on true the assignment plan for the tertiary and
+ * secondary will be generated with Munkres algorithm,
+ * otherwise will be generated using
+ * placeSecondaryAndTertiaryRS
*/
private void genAssignmentPlan(TableName tableName,
SnapshotOfRegionAssignmentFromMeta assignmentSnapshot,
@@ -579,7 +580,7 @@ public class RegionPlacementMaintainer implements Closeable {
}
/**
- * Print the assignment plan to the system output stream n
+ * Print the assignment plan to the system output stream
*/
public static void printAssignmentPlan(FavoredNodesPlan plan) {
if (plan == null) return;
@@ -622,7 +623,7 @@ public class RegionPlacementMaintainer implements Closeable {
}
/**
- * Update the assignment plan to all the region servers nn
+ * Update the assignment plan to all the region servers
*/
private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) throws IOException {
LOG.info("Start to update the region servers with the new assignment plan");
@@ -737,7 +738,7 @@ public class RegionPlacementMaintainer implements Closeable {
* as a string) also prints the baseline locality
* @param movesPerTable - how many primary regions will move per table
* @param regionLocalityMap - locality map from FS
- * @param newPlan - new assignment plan n
+ * @param newPlan - new assignment plan
*/
public void checkDifferencesWithOldPlan(Map movesPerTable,
Map> regionLocalityMap, FavoredNodesPlan newPlan)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 1bf3c73d59a..ed28db78de7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -353,8 +353,8 @@ public class ServerManager {
* Checks if the clock skew between the server and the master. If the clock skew exceeds the
* configured max, it will throw an exception; if it exceeds the configured warning threshold, it
* will log a warning but start normally.
- * @param serverName Incoming servers's name n * @throws ClockOutOfSyncException if the skew
- * exceeds the configured max value
+ * @param serverName Incoming servers's name
+ * @throws ClockOutOfSyncException if the skew exceeds the configured max value
*/
private void checkClockSkew(final ServerName serverName, final long serverCurrentTime)
throws ClockOutOfSyncException {
@@ -448,9 +448,7 @@ public class ServerManager {
return builder.build();
}
- /**
- * n * @return ServerMetrics if serverName is known else null
- */
+ /** Returns ServerMetrics if serverName is known else null */
public ServerMetrics getLoad(final ServerName serverName) {
return this.onlineServers.get(serverName);
}
@@ -656,8 +654,8 @@ public class ServerManager {
}
/**
- * Add the server to the drain list. n * @return True if the server is added or the server is
- * already on the drain list.
+ * Add the server to the drain list.
+ * @return True if the server is added or the server is already on the drain list.
*/
public synchronized boolean addServerToDrainList(final ServerName sn) {
// Warn if the server (sn) is not online. ServerName is of the form:
@@ -744,7 +742,7 @@ public class ServerManager {
* the master is stopped - the 'hbase.master.wait.on.regionservers.maxtostart' number of region
* servers is reached - the 'hbase.master.wait.on.regionservers.mintostart' is reached AND there
* have been no new region server in for 'hbase.master.wait.on.regionservers.interval' time AND
- * the 'hbase.master.wait.on.regionservers.timeout' is reached n
+ * the 'hbase.master.wait.on.regionservers.timeout' is reached
*/
public void waitForRegionServers(MonitoredTask status) throws InterruptedException {
final long interval =
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
index b1f067bfd82..8c91c58dbbe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
@@ -110,7 +110,7 @@ public class SplitLogManager {
* Its OK to construct this object even when region-servers are not online. It does lookup the
* orphan tasks in coordination engine but it doesn't block waiting for them to be done.
* @param master the master services
- * @param conf the HBase configuration n
+ * @param conf the HBase configuration
*/
public SplitLogManager(MasterServices master, Configuration conf) throws IOException {
this.server = master;
@@ -180,7 +180,7 @@ public class SplitLogManager {
/**
* @param logDir one region sever wal dir path in .logs
* @throws IOException if there was an error while splitting any log file
- * @return cumulative size of the logfiles split n
+ * @return cumulative size of the logfiles split
*/
public long splitLogDistributed(final Path logDir) throws IOException {
List logDirs = new ArrayList<>();
@@ -377,9 +377,7 @@ public class SplitLogManager {
}
- /**
- * nn * @return null on success, existing task on error
- */
+ /** Returns null on success, existing task on error */
private Task createTaskIfAbsent(String path, TaskBatch batch) {
Task oldtask;
// batch.installed is only changed via this function and
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 1fe43aba7da..c0b47b0bc24 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -714,7 +714,7 @@ public class MergeTableRegionsProcedure
/**
* The procedure could be restarted from a different machine. If the variable is null, we need to
* retrieve it.
- * @param env MasterProcedureEnv n
+ * @param env MasterProcedureEnv
*/
private ServerName getServerName(final MasterProcedureEnv env) {
if (regionLocation == null) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
index 93e2cf0bc34..6394f1d6ce6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
@@ -385,7 +385,7 @@ public class CloneSnapshotProcedure extends AbstractStateMachineTableProcedure createFilesystemLayout(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, final List newRegions) throws IOException {
@@ -480,7 +480,7 @@ public class CloneSnapshotProcedure extends AbstractStateMachineTableProcedure createFsLayout(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, List newRegions,
@@ -509,7 +509,7 @@ public class CloneSnapshotProcedure extends AbstractStateMachineTableProcedure
- * Exposed for TESTING n * @param handler handler the master should use TODO get rid of this if
- * possible, repackaging, modify tests.
+ * Exposed for TESTING
+ * @param handler handler the master should use TODO get rid of this if possible, repackaging,
+ * modify tests.
*/
public synchronized void setSnapshotHandlerForTesting(final TableName tableName,
final SnapshotSentinel handler) {
@@ -857,7 +858,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @param snapshot Snapshot Descriptor
* @param snapshotTableDesc Table Descriptor
* @param nonceKey unique identifier to prevent duplicated RPC
- * @return procId the ID of the clone snapshot procedure n
+ * @return procId the ID of the clone snapshot procedure
*/
private long cloneSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName,
final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc,
@@ -924,8 +925,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
}
/**
- * Restore or Clone the specified snapshot n * @param nonceKey unique identifier to prevent
- * duplicated RPC n
+ * Restore or Clone the specified snapshot
+ * @param nonceKey unique identifier to prevent duplicated RPC
*/
public long restoreOrCloneSnapshot(final SnapshotDescription reqSnapshot, final NonceKey nonceKey,
final boolean restoreAcl, String customSFT) throws IOException {
@@ -977,7 +978,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @param snapshotTableDesc Table Descriptor
* @param nonceKey unique identifier to prevent duplicated RPC
* @param restoreAcl true to restore acl of snapshot
- * @return procId the ID of the restore snapshot procedure n
+ * @return procId the ID of the restore snapshot procedure
*/
private long restoreSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName,
final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
index 6df17e58e22..e7b0f826082 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
@@ -168,7 +168,7 @@ public class DefaultMobStoreFlusher extends DefaultStoreFlusher {
* @param writer The store file writer.
* @param status Task that represents the flush operation and may be updated with
* status.
- * @param throughputController A controller to avoid flush too fast. n
+ * @param throughputController A controller to avoid flush too fast.
*/
protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId,
InternalScanner scanner, StoreFileWriter writer, MonitoredTask status,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java
index 755f7bb4c3f..3293208771a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java
@@ -49,7 +49,7 @@ public class MobFile {
/**
* Internal use only. This is used by the sweeper.
- * @return The store file scanner. n
+ * @return The store file scanner.
*/
public StoreFileScanner getScanner() throws IOException {
List sfs = new ArrayList<>();
@@ -64,7 +64,7 @@ public class MobFile {
* Reads a cell from the mob file.
* @param search The cell need to be searched in the mob file.
* @param cacheMobBlocks Should this scanner cache blocks.
- * @return The cell in the mob file. n
+ * @return The cell in the mob file.
*/
public MobCell readCell(Cell search, boolean cacheMobBlocks) throws IOException {
return readCell(search, cacheMobBlocks, sf.getMaxMemStoreTS());
@@ -75,7 +75,7 @@ public class MobFile {
* @param search The cell need to be searched in the mob file.
* @param cacheMobBlocks Should this scanner cache blocks.
* @param readPt the read point.
- * @return The cell in the mob file. n
+ * @return The cell in the mob file.
*/
public MobCell readCell(Cell search, boolean cacheMobBlocks, long readPt) throws IOException {
StoreFileScanner scanner = null;
@@ -108,7 +108,7 @@ public class MobFile {
}
/**
- * Opens the underlying reader. It's not thread-safe. Use MobFileCache.openFile() instead. n
+ * Opens the underlying reader. It's not thread-safe. Use MobFileCache.openFile() instead.
*/
public void open() throws IOException {
sf.initReader();
@@ -116,7 +116,7 @@ public class MobFile {
/**
* Closes the underlying reader, but do no evict blocks belonging to this file. It's not
- * thread-safe. Use MobFileCache.closeFile() instead. n
+ * thread-safe. Use MobFileCache.closeFile() instead.
*/
public void close() throws IOException {
if (sf != null) {
@@ -131,7 +131,7 @@ public class MobFile {
* @param path The path of the underlying StoreFile.
* @param conf The configuration.
* @param cacheConf The CacheConfig.
- * @return An instance of the MobFile. n
+ * @return An instance of the MobFile.
*/
public static MobFile create(FileSystem fs, Path path, Configuration conf, CacheConfig cacheConf)
throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java
index dc2bf5c14e3..ed1803cb38d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java
@@ -195,7 +195,7 @@ public class MobFileCache {
* @param fs The current file system.
* @param path The file path.
* @param cacheConf The current MobCacheConfig
- * @return A opened mob file. n
+ * @return A opened mob file.
*/
public MobFile openFile(FileSystem fs, Path path, CacheConfig cacheConf) throws IOException {
if (!isCacheEnabled) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileName.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileName.java
index d65d96cc64c..8f163cf5cc2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileName.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileName.java
@@ -51,8 +51,8 @@ public final class MobFileName {
public static final String REGION_SEP = "_";
/**
- * n * The start key. n * The string of the latest timestamp of cells in this file, the format is
- * yyyymmdd. n * The uuid
+ * The start key. The string of the latest timestamp of cells in this file, the format is
+ * yyyymmdd. The uuid
* @param regionName name of a region, where this file was created during flush or compaction.
*/
private MobFileName(byte[] startKey, String date, String uuid, String regionName) {
@@ -64,8 +64,8 @@ public final class MobFileName {
}
/**
- * n * The md5 hex string of the start key. n * The string of the latest timestamp of cells in
- * this file, the format is yyyymmdd. n * The uuid
+ * The md5 hex string of the start key. The string of the latest timestamp of cells in this file,
+ * the format is yyyymmdd. The uuid
* @param regionName name of a region, where this file was created during flush or compaction.
*/
private MobFileName(String startKey, String date, String uuid, String regionName) {
@@ -77,8 +77,8 @@ public final class MobFileName {
}
/**
- * Creates an instance of MobFileName n * The md5 hex string of the start key. n * The string of
- * the latest timestamp of cells in this file, the format is yyyymmdd.
+ * Creates an instance of MobFileName The md5 hex string of the start key. The string of the
+ * latest timestamp of cells in this file, the format is yyyymmdd.
* @param uuid The uuid.
* @param regionName name of a region, where this file was created during flush or compaction.
* @return An instance of a MobFileName.
@@ -88,8 +88,8 @@ public final class MobFileName {
}
/**
- * Creates an instance of MobFileName n * The md5 hex string of the start key. n * The string of
- * the latest timestamp of cells in this file, the format is yyyymmdd.
+ * Creates an instance of MobFileName The md5 hex string of the start key. The string of the
+ * latest timestamp of cells in this file, the format is yyyymmdd.
* @param uuid The uuid.
* @param regionName name of a region, where this file was created during flush or compaction.
* @return An instance of a MobFileName.
@@ -120,7 +120,7 @@ public final class MobFileName {
/**
* get startKey from MobFileName.
- * @param fileName file name. n
+ * @param fileName file name.
*/
public static String getStartKeyFromName(final String fileName) {
return fileName.substring(0, STARTKEY_END_INDEX);
@@ -128,7 +128,7 @@ public final class MobFileName {
/**
* get date from MobFileName.
- * @param fileName file name. n
+ * @param fileName file name.
*/
public static String getDateFromName(final String fileName) {
return fileName.substring(STARTKEY_END_INDEX, DATE_END_INDEX);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
index 43cf4255235..e04d67a0aaa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
@@ -435,7 +435,8 @@ public final class MobUtils {
/**
* Gets the RegionInfo of the mob files. This is a dummy region. The mob files are not saved in a
- * region in HBase. It's internally used only. n * @return A dummy mob region info.
+ * region in HBase. It's internally used only.
+ * @return A dummy mob region info.
*/
public static RegionInfo getMobRegionInfo(TableName tableName) {
return RegionInfoBuilder.newBuilder(tableName).setStartKey(MobConstants.MOB_REGION_NAME_BYTES)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadMonitoring.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadMonitoring.java
index c08a462ed0b..321fb7e5c21 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadMonitoring.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/ThreadMonitoring.java
@@ -44,7 +44,7 @@ public abstract class ThreadMonitoring {
}
/**
- * Print all of the thread's information and stack traces. nnn
+ * Print all of the thread's information and stack traces.
*/
public static void appendThreadInfo(StringBuilder sb, ThreadInfo info, String indent) {
boolean contention = threadBean.isThreadContentionMonitoringEnabled();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
index e54ea3febde..d95378f9b86 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
@@ -70,8 +70,8 @@ class NamespaceStateManager {
}
/**
- * Check if adding a region violates namespace quota, if not update namespace cache. nnn * @return
- * true, if region can be added to table.
+ * Check if adding a region violates namespace quota, if not update namespace cache.
+ * @return true, if region can be added to table.
* @throws IOException Signals that an I/O exception has occurred.
*/
synchronized boolean checkAndUpdateNamespaceRegionCount(TableName name, byte[] regionName,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java
index 91bceacae09..3fe09e848d3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java
@@ -53,14 +53,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDe
public abstract class MasterProcedureManager extends ProcedureManager implements Stoppable {
/**
* Initialize a globally barriered procedure for master.
- * @param master Master service interface nnn
+ * @param master Master service interface
*/
public abstract void initialize(MasterServices master, MetricsMaster metricsMaster)
throws KeeperException, IOException, UnsupportedOperationException;
/**
* Execute a distributed procedure on cluster
- * @param desc Procedure description n
+ * @param desc Procedure description
*/
public void execProcedure(ProcedureDescription desc) throws IOException {
}
@@ -68,7 +68,7 @@ public abstract class MasterProcedureManager extends ProcedureManager implements
/**
* Execute a distributed procedure on cluster with return data.
* @param desc Procedure description
- * @return data returned from the procedure execution, null if no data n
+ * @return data returned from the procedure execution, null if no data
*/
public byte[] execProcedureWithRet(ProcedureDescription desc) throws IOException {
return null;
@@ -84,7 +84,7 @@ public abstract class MasterProcedureManager extends ProcedureManager implements
/**
* Check if the procedure is finished successfully
* @param desc Procedure description
- * @return true if the specified procedure is finished successfully n
+ * @return true if the specified procedure is finished successfully
*/
public abstract boolean isProcedureDone(ProcedureDescription desc) throws IOException;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
index 8eb477fb846..d823fac1aa3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
@@ -234,7 +234,7 @@ public class Procedure implements Callable, ForeignExceptionListener {
/**
* Sends a message to Members to create a new {@link Subprocedure} for this Procedure and execute
- * the {@link Subprocedure#acquireBarrier} step. n
+ * the {@link Subprocedure#acquireBarrier} step.
*/
public void sendGlobalBarrierStart() throws ForeignException {
// start the procedure
@@ -255,7 +255,7 @@ public class Procedure implements Callable, ForeignExceptionListener {
* Sends a message to all members that the global barrier condition has been satisfied. This
* should only be executed after all members have completed its
* {@link Subprocedure#acquireBarrier()} call successfully. This triggers the member
- * {@link Subprocedure#insideBarrier} method. n
+ * {@link Subprocedure#insideBarrier} method.
*/
public void sendGlobalBarrierReached() throws ForeignException {
try {
@@ -285,7 +285,7 @@ public class Procedure implements Callable, ForeignExceptionListener {
//
/**
- * Call back triggered by an individual member upon successful local barrier acquisition n
+ * Call back triggered by an individual member upon successful local barrier acquisition
*/
public void barrierAcquiredByMember(String member) {
LOG.debug("member: '" + member + "' joining acquired barrier for procedure '" + procName
@@ -307,7 +307,7 @@ public class Procedure implements Callable, ForeignExceptionListener {
/**
* Call back triggered by a individual member upon successful local in-barrier execution and
- * release nn
+ * release
*/
public void barrierReleasedByMember(String member, byte[] dataFromMember) {
boolean removed = false;
@@ -329,7 +329,7 @@ public class Procedure implements Callable, ForeignExceptionListener {
/**
* Waits until the entire procedure has globally completed, or has been aborted. If an exception
- * is thrown the procedure may or not have run cleanup to trigger the completion latch yet. nn
+ * is thrown the procedure may or not have run cleanup to trigger the completion latch yet.
*/
public void waitForCompleted() throws ForeignException, InterruptedException {
waitForLatch(completedLatch, monitor, wakeFrequency, procName + " completed");
@@ -338,7 +338,7 @@ public class Procedure implements Callable, ForeignExceptionListener {
/**
* Waits until the entire procedure has globally completed, or has been aborted. If an exception
* is thrown the procedure may or not have run cleanup to trigger the completion latch yet.
- * @return data returned from procedure members upon successfully completing subprocedure. nn
+ * @return data returned from procedure members upon successfully completing subprocedure.
*/
public HashMap waitForCompletedWithRet()
throws ForeignException, InterruptedException {
@@ -347,7 +347,7 @@ public class Procedure implements Callable, ForeignExceptionListener {
}
/**
- * Check if the entire procedure has globally completed, or has been aborted. n
+ * Check if the entire procedure has globally completed, or has been aborted.
*/
public boolean isCompleted() throws ForeignException {
// Rethrow exception if any
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java
index e02776ecb69..2d654691543 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java
@@ -86,16 +86,16 @@ public class ProcedureCoordinator {
}
/**
- * Default thread pool for the procedure n * @param opThreads the maximum number of threads to
- * allow in the pool
+ * Default thread pool for the procedure
+ * @param opThreads the maximum number of threads to allow in the pool
*/
public static ThreadPoolExecutor defaultPool(String coordName, int opThreads) {
return defaultPool(coordName, opThreads, KEEP_ALIVE_MILLIS_DEFAULT);
}
/**
- * Default thread pool for the procedure n * @param opThreads the maximum number of threads to
- * allow in the pool
+ * Default thread pool for the procedure
+ * @param opThreads the maximum number of threads to allow in the pool
* @param keepAliveMillis the maximum time (ms) that excess idle threads will wait for new tasks
*/
public static ThreadPoolExecutor defaultPool(String coordName, int opThreads,
@@ -107,7 +107,7 @@ public class ProcedureCoordinator {
}
/**
- * Shutdown the thread pools and release rpc resources n
+ * Shutdown the thread pools and release rpc resources
*/
public void close() throws IOException {
// have to use shutdown now to break any latch waiting
@@ -221,7 +221,8 @@ public class ProcedureCoordinator {
}
/**
- * Exposed for hooking with unit tests. nnn * @return the newly created procedure
+ * Exposed for hooking with unit tests.
+ * @return the newly created procedure
*/
Procedure createProcedure(ForeignExceptionDispatcher fed, String procName, byte[] procArgs,
List expectedMembers) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java
index e24f1ba1ba7..41354c2c8bc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java
@@ -32,8 +32,8 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface ProcedureCoordinatorRpcs extends Closeable {
/**
- * Initialize and start threads necessary to connect an implementation's rpc mechanisms. n
- * * @return true if succeed, false if encountered initialization errors.
+ * Initialize and start threads necessary to connect an implementation's rpc mechanisms.
+ * @return true if succeed, false if encountered initialization errors.
*/
boolean start(final ProcedureCoordinator listener);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java
index 21a9e741028..d9caf7e5d17 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java
@@ -66,16 +66,16 @@ public class ProcedureMember implements Closeable {
}
/**
- * Default thread pool for the procedure n * @param procThreads the maximum number of threads to
- * allow in the pool
+ * Default thread pool for the procedure
+ * @param procThreads the maximum number of threads to allow in the pool
*/
public static ThreadPoolExecutor defaultPool(String memberName, int procThreads) {
return defaultPool(memberName, procThreads, KEEP_ALIVE_MILLIS_DEFAULT);
}
/**
- * Default thread pool for the procedure n * @param procThreads the maximum number of threads to
- * allow in the pool
+ * Default thread pool for the procedure
+ * @param procThreads the maximum number of threads to allow in the pool
* @param keepAliveMillis the maximum time (ms) that excess idle threads will wait for new tasks
*/
public static ThreadPoolExecutor defaultPool(String memberName, int procThreads,
@@ -97,7 +97,7 @@ public class ProcedureMember implements Closeable {
/**
* This is separated from execution so that we can detect and handle the case where the
* subprocedure is invalid and inactionable due to bad info (like DISABLED snapshot type being
- * sent here) nnn
+ * sent here)
*/
public Subprocedure createSubprocedure(String opName, byte[] data) {
return builder.buildSubprocedure(opName, data);
@@ -189,7 +189,7 @@ public class ProcedureMember implements Closeable {
/**
* Shutdown the threadpool, and wait for upto timeoutMs millis before bailing
* @param timeoutMs timeout limit in millis
- * @return true if successfully, false if bailed due to timeout. n
+ * @return true if successfully, false if bailed due to timeout.
*/
boolean closeAndWait(long timeoutMs) throws InterruptedException {
pool.shutdown();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java
index f0b534d49ca..c298185d9e0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java
@@ -31,7 +31,7 @@ import org.apache.zookeeper.KeeperException;
public abstract class RegionServerProcedureManager extends ProcedureManager {
/**
* Initialize a globally barriered procedure for region servers.
- * @param rss Region Server service interface n
+ * @param rss Region Server service interface
*/
public abstract void initialize(RegionServerServices rss) throws KeeperException;
@@ -42,7 +42,7 @@ public abstract class RegionServerProcedureManager extends ProcedureManager {
/**
* Close this and all running procedure tasks
- * @param force forcefully stop all running tasks n
+ * @param force forcefully stop all running tasks
*/
public abstract void stop(boolean force) throws IOException;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java
index 17b465fd2cb..ad10b0e91db 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java
@@ -230,7 +230,7 @@ abstract public class Subprocedure implements Callable {
* etc) to satisfy the Procedures barrier condition. For example, this would be where to make all
* the regions on a RS on the quiescent for an procedure that required all regions to be globally
* quiesed. Users should override this method. If a quiescent is not required, this is overkill
- * but can still be used to execute a procedure on all members and to propagate any exceptions. n
+ * but can still be used to execute a procedure on all members and to propagate any exceptions.
*/
abstract public void acquireBarrier() throws ForeignException;
@@ -240,19 +240,19 @@ abstract public class Subprocedure implements Callable {
* have been quiesced, and procedures that require this precondition could be implemented here.
* The implementation should also collect the result of the subprocedure as data to be returned to
* the coordinator upon successful completion. Users should override this method.
- * @return the data the subprocedure wants to return to coordinator side. n
+ * @return the data the subprocedure wants to return to coordinator side.
*/
abstract public byte[] insideBarrier() throws ForeignException;
/**
* Users should override this method. This implementation of this method should rollback and
* cleanup any temporary or partially completed state that the {@link #acquireBarrier()} may have
- * created. n
+ * created.
*/
abstract public void cleanup(Exception e);
/**
- * Method to cancel the Subprocedure by injecting an exception from and external source. n
+ * Method to cancel the Subprocedure by injecting an exception from and external source.
*/
public void cancel(String msg, Throwable cause) {
LOG.error(msg, cause);
@@ -278,7 +278,7 @@ abstract public class Subprocedure implements Callable {
//
/**
- * Wait for the reached global barrier notification. Package visibility for testing nn
+ * Wait for the reached global barrier notification. Package visibility for testing
*/
void waitForReachedGlobalBarrier() throws ForeignException, InterruptedException {
Procedure.waitForLatch(inGlobalBarrier, monitor, wakeFrequency,
@@ -286,7 +286,7 @@ abstract public class Subprocedure implements Callable {
}
/**
- * Waits until the entire procedure has globally completed, or has been aborted. nn
+ * Waits until the entire procedure has globally completed, or has been aborted.
*/
public void waitForLocallyCompleted() throws ForeignException, InterruptedException {
Procedure.waitForLatch(releasedLocalBarrier, monitor, wakeFrequency,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java
index 1dff3ced994..c3ad2e5cf7f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java
@@ -218,7 +218,7 @@ public abstract class ZKProcedureUtil extends ZKListener implements Closeable {
// --------------------------------------------------------------------------
/**
* Recursively print the current state of ZK (non-transactional)
- * @param root name of the root directory in zk to print n
+ * @param root name of the root directory in zk to print
*/
void logZKTree(String root) {
if (!LOG.isDebugEnabled()) return;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java
index 7771f4e6e63..3322f7a5cd5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java
@@ -109,7 +109,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur
/**
* Close this and all running tasks
- * @param force forcefully stop all running tasks n
+ * @param force forcefully stop all running tasks
*/
@Override
public void stop(boolean force) throws IOException {
@@ -126,8 +126,8 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur
/**
* If in a running state, creates the specified subprocedure to flush table regions. Because this
* gets the local list of regions to flush and not the set the master had, there is a possibility
- * of a race where regions may be missed. nn * @return Subprocedure to submit to the
- * ProcedureMember.
+ * of a race where regions may be missed.
+ * @return Subprocedure to submit to the ProcedureMember.
*/
public Subprocedure buildSubprocedure(String table, String family) {
@@ -164,8 +164,8 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur
/**
* Get the list of regions to flush for the table on this server It is possible that if a region
- * moves somewhere between the calls we'll miss the region. n * @return the list of online
- * regions. Empty list is returned if no regions. n
+ * moves somewhere between the calls we'll miss the region.
+ * @return the list of online regions. Empty list is returned if no regions.
*/
private List getRegionsToFlush(String table) throws IOException {
return (List) rss.getRegions(TableName.valueOf(table));
@@ -235,7 +235,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur
/**
* Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)}.
* This *must* be called after all tasks are submitted via submitTask.
- * @return true on success, false otherwise n
+ * @return true on success, false otherwise
*/
boolean waitForOutstandingTasks() throws ForeignException, InterruptedException {
LOG.debug("Waiting for local region flush to finish.");
@@ -279,7 +279,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur
/**
* This attempts to cancel out all pending and in progress tasks. Does not interrupt the running
- * tasks itself. An ongoing HRegion.flush() should not be interrupted (see HBASE-13877). n
+ * tasks itself. An ongoing HRegion.flush() should not be interrupted (see HBASE-13877).
*/
void cancelTasks() throws InterruptedException {
Collection> tasks = futures;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java
index cfdf0e12c85..6754fdef08a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtobufUtil.java
@@ -129,9 +129,7 @@ public class ReplicationProtobufUtil {
return new Pair<>(builder.build(), getCellScanner(allCells, size));
}
- /**
- * n * @return cells packaged as a CellScanner
- */
+ /** Returns cells packaged as a CellScanner */
static CellScanner getCellScanner(final List> cells, final int size) {
return new SizedCellScanner() {
private final Iterator> entries = cells.iterator();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
index 179c617462f..5cd3a92e5b6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
@@ -282,7 +282,7 @@ public abstract class AbstractMemStore implements MemStore {
}
/*
- * nn * @return Return lowest of a or b or null if both a and b are null
+ * @return Return lowest of a or b or null if both a and b are null
*/
protected Cell getLowest(final Cell a, final Cell b) {
if (a == null) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java
index e8878a04b47..c7587a147a6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSink.java
@@ -31,7 +31,7 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface CellSink {
/**
* Append the given cell
- * @param cell the cell to be added n
+ * @param cell the cell to be added
*/
void append(Cell cell) throws IOException;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
index ba9cd9d13ec..cb6795d464b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
@@ -385,7 +385,7 @@ public class ChunkCreator {
/**
* Add the chunks to the pool, when the pool achieves the max size, it will skip the remaining
- * chunks n
+ * chunks
*/
private void putbackChunks(Chunk c) {
int toAdd = this.maxCount - reclaimedChunks.size();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index 9754bbee7bb..1a2cbc6bdab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -543,7 +543,7 @@ public class CompactingMemStore extends AbstractMemStore {
* cells to currActive.cellSet,so for
* {@link CompactingMemStore#flushInMemory(MutableSegment)},checkEmpty parameter is false. But if
* {@link CompactingMemStore#snapshot} called this method,because there is no pending
- * write,checkEmpty parameter could be true. nn
+ * write,checkEmpty parameter could be true.
*/
protected void pushActiveToPipeline(MutableSegment currActive, boolean checkEmpty) {
if (!checkEmpty || !currActive.isEmpty()) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FavoredNodesForRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FavoredNodesForRegion.java
index 27fb3761de3..f6fc2250eff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FavoredNodesForRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FavoredNodesForRegion.java
@@ -30,14 +30,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName;
@InterfaceAudience.Private
public interface FavoredNodesForRegion {
/**
- * Used to update the favored nodes mapping when required. nn
+ * Used to update the favored nodes mapping when required.
*/
void updateRegionFavoredNodesMapping(String encodedRegionName, List favoredNodes);
/**
* Get the favored nodes mapping for this region. Used when the HDFS create API is invoked to pass
- * in favored nodes hints for new region files. n * @return array containing the favored nodes'
- * InetSocketAddresses
+ * in favored nodes hints for new region files.
+ * @return array containing the favored nodes' InetSocketAddresses
*/
InetSocketAddress[] getFavoredNodesForRegion(String encodedRegionName);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java
index b263a6d3f59..f8e68ce8629 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java
@@ -49,18 +49,18 @@ public interface FlushRequester {
boolean requestDelayedFlush(HRegion region, long delay);
/**
- * Register a FlushRequestListener n
+ * Register a FlushRequestListener
*/
void registerFlushRequestListener(final FlushRequestListener listener);
/**
- * Unregister the given FlushRequestListener n * @return true when passed listener is unregistered
- * successfully.
+ * Unregister the given FlushRequestListener
+ * @return true when passed listener is unregistered successfully.
*/
public boolean unregisterFlushRequestListener(final FlushRequestListener listener);
/**
- * Sets the global memstore limit to a new size. n
+ * Sets the global memstore limit to a new size.
*/
public void setGlobalMemStoreLimit(long globalMemStoreSize);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
index b5396110db2..6a07a6c9a08 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
@@ -176,7 +176,7 @@ public class HMobStore extends HStore {
* @param compression The compression algorithm.
* @param startKey The start key.
* @param isCompaction If the writer is used in compaction.
- * @return The writer for the mob file. n
+ * @return The writer for the mob file.
*/
public StoreFileWriter createWriterInTmp(Date date, long maxKeyCount,
Compression.Algorithm compression, byte[] startKey, boolean isCompaction) throws IOException {
@@ -195,7 +195,7 @@ public class HMobStore extends HStore {
* @param compression The compression algorithm.
* @param startKey The start key.
* @param isCompaction If the writer is used in compaction.
- * @return The writer for the mob file. n
+ * @return The writer for the mob file.
*/
public StoreFileWriter createWriter(Date date, long maxKeyCount,
Compression.Algorithm compression, byte[] startKey, boolean isCompaction,
@@ -216,7 +216,7 @@ public class HMobStore extends HStore {
* @param compression The compression algorithm.
* @param startKey The start key.
* @param isCompaction If the writer is used in compaction.
- * @return The writer for the mob file. n
+ * @return The writer for the mob file.
*/
public StoreFileWriter createWriterInTmp(String date, Path basePath, long maxKeyCount,
Compression.Algorithm compression, byte[] startKey, boolean isCompaction,
@@ -235,7 +235,7 @@ public class HMobStore extends HStore {
* @param maxKeyCount The key count.
* @param compression The compression algorithm.
* @param isCompaction If the writer is used in compaction.
- * @return The writer for the mob file. n
+ * @return The writer for the mob file.
*/
public StoreFileWriter createWriterInTmp(MobFileName mobFileName, Path basePath, long maxKeyCount,
@@ -251,7 +251,7 @@ public class HMobStore extends HStore {
/**
* Commits the mob file.
* @param sourceFile The source file.
- * @param targetPath The directory path where the source file is renamed to. n
+ * @param targetPath The directory path where the source file is renamed to.
*/
public void commitFile(final Path sourceFile, Path targetPath) throws IOException {
if (sourceFile == null) {
@@ -298,7 +298,7 @@ public class HMobStore extends HStore {
* DefaultMobStoreCompactor where we can read empty value for the missing cell.
* @param reference The cell found in the HBase, its value is a path to a mob file.
* @param cacheBlocks Whether the scanner should cache blocks.
- * @return The cell found in the mob file. n
+ * @return The cell found in the mob file.
*/
public MobCell resolve(Cell reference, boolean cacheBlocks) throws IOException {
return resolve(reference, cacheBlocks, -1, true);
@@ -311,7 +311,7 @@ public class HMobStore extends HStore {
* @param cacheBlocks Whether the scanner should cache blocks.
* @param readEmptyValueOnMobCellMiss should return empty mob cell if reference can not be
* resolved.
- * @return The cell found in the mob file. n
+ * @return The cell found in the mob file.
*/
public MobCell resolve(Cell reference, boolean cacheBlocks, boolean readEmptyValueOnMobCellMiss)
throws IOException {
@@ -326,7 +326,7 @@ public class HMobStore extends HStore {
* @param readPt the read point.
* @param readEmptyValueOnMobCellMiss Whether return null value when the mob file is missing or
* corrupt.
- * @return The cell found in the mob file. n
+ * @return The cell found in the mob file.
*/
public MobCell resolve(Cell reference, boolean cacheBlocks, long readPt,
boolean readEmptyValueOnMobCellMiss) throws IOException {
@@ -395,7 +395,7 @@ public class HMobStore extends HStore {
* @param readPt the read point.
* @param readEmptyValueOnMobCellMiss Whether return null value when the mob file is missing or
* corrupt.
- * @return The found cell. Null if there's no such a cell. n
+ * @return The found cell. Null if there's no such a cell.
*/
private MobCell readCell(List locations, String fileName, Cell search,
boolean cacheMobBlocks, long readPt, boolean readEmptyValueOnMobCellMiss) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 5ec6984d1ad..83fe1bf67d2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1115,7 +1115,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
/**
- * Open all Stores. nn * @return Highest sequenceId found out in a Store. n
+ * Open all Stores.
+ * @return Highest sequenceId found out in a Store.
*/
private long initializeStores(CancelableProgressable reporter, MonitoredTask status)
throws IOException {
@@ -1292,7 +1293,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* @param tableDescriptor TableDescriptor of the table
* @param regionInfo encoded name of the region
* @param tablePath the table directory
- * @return The HDFS blocks distribution for the given region. n
+ * @return The HDFS blocks distribution for the given region.
*/
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration conf,
TableDescriptor tableDescriptor, RegionInfo regionInfo, Path tablePath) throws IOException {
@@ -2159,7 +2160,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// upkeep.
//////////////////////////////////////////////////////////////////////////////
/**
- * Do preparation for pending compaction. n
+ * Do preparation for pending compaction.
*/
protected void doRegionCompactionPrep() throws IOException {
}
@@ -2173,7 +2174,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* activities. The regionserver does not normally compact and split in parallel. However by
* calling this method you may introduce unexpected and unhandled concurrency. Don't do this
* unless you know what you are doing.
- * @param majorCompaction True to force a major compaction regardless of thresholds n
+ * @param majorCompaction True to force a major compaction regardless of thresholds
*/
public void compact(boolean majorCompaction) throws IOException {
if (majorCompaction) {
@@ -2228,8 +2229,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* that no locking is necessary at this level because compaction only conflicts with a region
* split, and that cannot happen because the region server does them sequentially and not in
* parallel.
- * @param compaction Compaction details, obtained by requestCompaction() n * @return whether the
- * compaction completed
+ * @param compaction Compaction details, obtained by requestCompaction()
+ * @return whether the compaction completed
*/
public boolean compact(CompactionContext compaction, HStore store,
ThroughputController throughputController) throws IOException {
@@ -3065,7 +3066,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
/**
* Method to safely get the next sequence number.
- * @return Next sequence number unassociated with any actual edit. n
+ * @return Next sequence number unassociated with any actual edit.
*/
protected long getNextSequenceId(final WAL wal) throws IOException {
WriteEntry we = mvcc.begin();
@@ -5137,7 +5138,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
/**
* Replace any cell timestamps set to {@link org.apache.hadoop.hbase.HConstants#LATEST_TIMESTAMP}
- * provided current timestamp. nn
+ * provided current timestamp.
*/
private static void updateCellTimestamps(final Iterable> cellItr, final byte[] now)
throws IOException {
@@ -5278,7 +5279,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
/**
- * Check the collection of families for valid timestamps n * @param now current timestamp n
+ * Check the collection of families for valid timestamps
+ * @param now current timestamp
*/
public void checkTimestamps(final Map> familyMap, long now)
throws FailedSanityCheckException {
@@ -5304,7 +5306,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
/*
- * n * @return True if size is over the flush threshold
+ * @return True if size is over the flush threshold
*/
private boolean isFlushSize(MemStoreSize size) {
return size.getHeapSize() + size.getOffHeapSize() > getMemStoreFlushSize();
@@ -6809,7 +6811,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* make sure have been through lease recovery before get file status, so the file length can be
* trusted.
* @param p File to check.
- * @return True if file was zero-length (and if so, we'll delete it in here). n
+ * @return True if file was zero-length (and if so, we'll delete it in here).
*/
private static boolean isZeroLengthThenDelete(final FileSystem fs, final FileStatus stat,
final Path p) throws IOException {
@@ -6894,7 +6896,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
/**
* Get an exclusive ( write lock ) lock on a given row.
* @param row Which row to lock.
- * @return A locked RowLock. The lock is exclusive and already aqquired. n
+ * @return A locked RowLock. The lock is exclusive and already aqquired.
*/
public RowLock getRowLock(byte[] row) throws IOException {
return getRowLock(row, false);
@@ -7130,8 +7132,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* column families atomically.
* @param familyPaths List of Pair<byte[] column family, String hfilePath>
* @param bulkLoadListener Internal hooks enabling massaging/preparation of a file about to be
- * bulk loaded n * @return Map from family to List of store file paths if
- * successful, null if failed recoverably
+ * bulk loaded
+ * @return Map from family to List of store file paths if successful, null if failed recoverably
* @throws IOException if failed unrecoverably.
*/
public Map> bulkLoadHFiles(Collection> familyPaths,
@@ -7148,7 +7150,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* Called before an HFile is actually loaded
* @param family family being loaded to
* @param srcPath path of HFile
- * @return final path to be used for actual loading n
+ * @return final path to be used for actual loading
*/
String prepareBulkLoad(byte[] family, String srcPath, boolean copyFile, String customStaging)
throws IOException;
@@ -7156,14 +7158,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
/**
* Called after a successful HFile load
* @param family family being loaded to
- * @param srcPath path of HFile n
+ * @param srcPath path of HFile
*/
void doneBulkLoad(byte[] family, String srcPath) throws IOException;
/**
* Called after a failed HFile load
* @param family family being loaded to
- * @param srcPath path of HFile n
+ * @param srcPath path of HFile
*/
void failedBulkLoad(byte[] family, String srcPath) throws IOException;
}
@@ -7171,11 +7173,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
/**
* Attempts to atomically load a group of hfiles. This is critical for loading rows with multiple
* column families atomically.
- * @param familyPaths List of Pair<byte[] column family, String hfilePath> n * @param
- * bulkLoadListener Internal hooks enabling massaging/preparation of a file
- * about to be bulk loaded
- * @param copyFile always copy hfiles if true
- * @param clusterIds ids from clusters that had already handled the given bulkload event.
+ * @param familyPaths List of Pair<byte[] column family, String hfilePath>
+ * @param bulkLoadListener Internal hooks enabling massaging/preparation of a file about to be
+ * bulk loaded
+ * @param copyFile always copy hfiles if true
+ * @param clusterIds ids from clusters that had already handled the given bulkload event.
* @return Map from family to List of store file paths if successful, null if failed recoverably
* @throws IOException if failed unrecoverably.
*/
@@ -7523,7 +7525,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* @param wal WAL for region to use. This method will call WAL#setSequenceNumber(long) passing
* the result of the call to HRegion#getMinSequenceId() to ensure the wal id is
* properly kept up. HRegionStore does this every time it opens a new region.
- * @return new HRegion n
+ * @return new HRegion
*/
public static HRegion openHRegion(final RegionInfo info, final TableDescriptor htd, final WAL wal,
final Configuration conf) throws IOException {
@@ -7541,7 +7543,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* @param conf The Configuration object to use.
* @param rsServices An interface we can request flushes against.
* @param reporter An interface we can report progress against.
- * @return new HRegion n
+ * @return new HRegion
*/
public static HRegion openHRegion(final RegionInfo info, final TableDescriptor htd, final WAL wal,
final Configuration conf, final RegionServerServices rsServices,
@@ -7558,7 +7560,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* the result of the call to HRegion#getMinSequenceId() to ensure the wal id is
* properly kept up. HRegionStore does this every time it opens a new region.
* @param conf The Configuration object to use.
- * @return new HRegion n
+ * @return new HRegion
*/
public static HRegion openHRegion(Path rootDir, final RegionInfo info, final TableDescriptor htd,
final WAL wal, final Configuration conf) throws IOException {
@@ -7577,7 +7579,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* @param conf The Configuration object to use.
* @param rsServices An interface we can request flushes against.
* @param reporter An interface we can report progress against.
- * @return new HRegion n
+ * @return new HRegion
*/
public static HRegion openHRegion(final Path rootDir, final RegionInfo info,
final TableDescriptor htd, final WAL wal, final Configuration conf,
@@ -7923,7 +7925,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* @param nonceGroup Optional nonce group of the operation (client Id)
* @param nonce Optional nonce of the operation (unique random id to ensure "more
* idempotence") If multiple rows are locked care should be taken that
- * rowsToLock is sorted in order to avoid deadlocks. n
+ * rowsToLock is sorted in order to avoid deadlocks.
*/
@Override
public void mutateRowsWithLocks(Collection mutations, Collection rowsToLock,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index 9ac55759d9b..48afdc59f86 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -360,7 +360,7 @@ public class HRegionFileSystem {
/**
* Returns true if the specified family has reference files
* @param familyName Column Family Name
- * @return true if family contains reference files n
+ * @return true if family contains reference files
*/
public boolean hasReferences(final String familyName) throws IOException {
Path storeDir = getStoreDir(familyName);
@@ -382,7 +382,7 @@ public class HRegionFileSystem {
/**
* Check whether region has Reference file
* @param htd table desciptor of the region
- * @return true if region has reference file n
+ * @return true if region has reference file
*/
public boolean hasReferences(final TableDescriptor htd) throws IOException {
for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
@@ -465,7 +465,7 @@ public class HRegionFileSystem {
* Move the file from a build/temp location to the main family store directory.
* @param familyName Family that will gain the file
* @param buildPath {@link Path} to the file to commit.
- * @return The new {@link Path} of the committed file n
+ * @return The new {@link Path} of the committed file
*/
public Path commitStoreFile(final String familyName, final Path buildPath) throws IOException {
Path dstPath = preCommitStoreFile(familyName, buildPath, -1, false);
@@ -480,7 +480,7 @@ public class HRegionFileSystem {
* @param seqNum Sequence Number to append to the file name (less then 0 if no sequence
* number)
* @param generateNewName False if you want to keep the buildPath name
- * @return The new {@link Path} of the to be committed file n
+ * @return The new {@link Path} of the to be committed file
*/
private Path preCommitStoreFile(final String familyName, final Path buildPath, final long seqNum,
final boolean generateNewName) throws IOException {
@@ -506,7 +506,7 @@ public class HRegionFileSystem {
* Moves file from staging dir to region dir
* @param buildPath {@link Path} to the file to commit.
* @param dstPath {@link Path} to the file under region dir
- * @return The {@link Path} of the committed file n
+ * @return The {@link Path} of the committed file
*/
Path commitStoreFile(final Path buildPath, Path dstPath) throws IOException {
// rename is not necessary in case of direct-insert stores
@@ -550,7 +550,7 @@ public class HRegionFileSystem {
* @param familyName Family that will gain the file
* @param srcPath {@link Path} to the file to import
* @param seqNum Bulk Load sequence number
- * @return The destination {@link Path} of the bulk loaded file n
+ * @return The destination {@link Path} of the bulk loaded file
*/
Pair bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
throws IOException {
@@ -585,7 +585,7 @@ public class HRegionFileSystem {
/**
* Remove daughter region
- * @param regionInfo daughter {@link RegionInfo} n
+ * @param regionInfo daughter {@link RegionInfo}
*/
void cleanupDaughterRegion(final RegionInfo regionInfo) throws IOException {
Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
@@ -771,7 +771,7 @@ public class HRegionFileSystem {
/**
* Remove merged region
- * @param mergedRegion {@link RegionInfo} n
+ * @param mergedRegion {@link RegionInfo}
*/
public void cleanupMergedRegion(final RegionInfo mergedRegion) throws IOException {
Path regionDir = new Path(this.tableDir, mergedRegion.getEncodedName());
@@ -816,7 +816,7 @@ public class HRegionFileSystem {
}
/**
- * Commit a merged region, making it ready for use. n
+ * Commit a merged region, making it ready for use.
*/
public void commitMergedRegion(List allMergedFiles, MasterProcedureEnv env)
throws IOException {
@@ -834,9 +834,7 @@ public class HRegionFileSystem {
// Create/Open/Delete Helpers
// ===========================================================================
- /**
- * n * @return Content of the file we write out to the filesystem under a region n
- */
+ /** Returns Content of the file we write out to the filesystem under a region */
private static byte[] getRegionInfoFileContent(final RegionInfo hri) throws IOException {
return RegionInfo.toDelimitedByteArray(hri);
}
@@ -1070,9 +1068,9 @@ public class HRegionFileSystem {
}
/**
- * Creates a directory. Assumes the user has already checked for this directory existence. n
- * * @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks
- * whether the directory exists or not, and returns true if it exists. n
+ * Creates a directory. Assumes the user has already checked for this directory existence.
+ * @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks
+ * whether the directory exists or not, and returns true if it exists.
*/
boolean createDir(Path dir) throws IOException {
int i = 0;
@@ -1094,8 +1092,8 @@ public class HRegionFileSystem {
}
/**
- * Renames a directory. Assumes the user has already checked for this directory existence. nn
- * * @return true if rename is successful. n
+ * Renames a directory. Assumes the user has already checked for this directory existence.
+ * @return true if rename is successful.
*/
boolean rename(Path srcpath, Path dstPath) throws IOException {
IOException lastIOE = null;
@@ -1119,8 +1117,8 @@ public class HRegionFileSystem {
}
/**
- * Deletes a directory. Assumes the user has already checked for this directory existence. n
- * * @return true if the directory is deleted. n
+ * Deletes a directory. Assumes the user has already checked for this directory existence.
+ * @return true if the directory is deleted.
*/
boolean deleteDir(Path dir) throws IOException {
IOException lastIOE = null;
@@ -1152,9 +1150,9 @@ public class HRegionFileSystem {
/**
* Creates a directory for a filesystem and configuration object. Assumes the user has already
- * checked for this directory existence. nnn * @return the result of fs.mkdirs(). In case
- * underlying fs throws an IOException, it checks whether the directory exists or not, and returns
- * true if it exists. n
+ * checked for this directory existence.
+ * @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks
+ * whether the directory exists or not, and returns true if it exists.
*/
private static boolean createDirOnFileSystem(FileSystem fs, Configuration conf, Path dir)
throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index c18b7e73cdf..7c166df74af 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2292,8 +2292,8 @@ public class HRegionServer extends HBaseServerBase
/**
* Cause the server to exit without closing the regions it is serving, the log it is using and
* without notifying the master. Used unit testing and on catastrophic events such as HDFS is
- * yanked out from under hbase or we OOME. n * the reason we are aborting n * the exception that
- * caused the abort, or null
+ * yanked out from under hbase or we OOME. the reason we are aborting the exception that caused
+ * the abort, or null
*/
@Override
public void abort(String reason, Throwable cause) {
@@ -2504,7 +2504,7 @@ public class HRegionServer extends HBaseServerBase
/*
* Let the master know we're here Run initialization using parameters passed us by the master.
* @return A Map of key/value configurations we got from the Master else null if we failed to
- * register. n
+ * register.
*/
private RegionServerStartupResponse reportForDuty() throws IOException {
if (this.masterless) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
index ec0b9aafc4c..b4f5ad08236 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
@@ -210,7 +210,7 @@ public class HStoreFile implements StoreFile {
* actually present in the HFile, because column family configuration might
* change. If this is {@link BloomType#NONE}, the existing Bloom filter is
* ignored.
- * @param primaryReplica true if this is a store file for primary replica, otherwise false. n
+ * @param primaryReplica true if this is a store file for primary replica, otherwise false.
*/
public HStoreFile(FileSystem fs, Path p, Configuration conf, CacheConfig cacheConf,
BloomType cfBloomType, boolean primaryReplica) throws IOException {
@@ -567,7 +567,7 @@ public class HStoreFile implements StoreFile {
}
/**
- * @param evictOnClose whether to evict blocks belonging to this file n
+ * @param evictOnClose whether to evict blocks belonging to this file
*/
public synchronized void closeStoreFile(boolean evictOnClose) throws IOException {
if (this.initialReader != null) {
@@ -577,7 +577,7 @@ public class HStoreFile implements StoreFile {
}
/**
- * Delete this file n
+ * Delete this file
*/
public void deleteStoreFile() throws IOException {
boolean evictOnClose = cacheConf != null ? cacheConf.shouldEvictOnClose() : true;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryTuner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryTuner.java
index cd6281729c7..563adecd1ae 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryTuner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryTuner.java
@@ -32,8 +32,8 @@ import org.apache.yetus.audience.InterfaceStability;
public interface HeapMemoryTuner extends Configurable {
/**
- * Perform the heap memory tuning operation. n * @return TunerResult including the
- * heap percentage for memstore and block cache
+ * Perform the heap memory tuning operation.
+ * @return TunerResult including the heap percentage for memstore and block cache
*/
TunerResult tune(TunerContext context);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScan.java
index 045407c7cf1..795abd107e1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScan.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScan.java
@@ -46,7 +46,7 @@ public class InternalScan extends Scan {
}
/**
- * @param scan - original scan object n
+ * @param scan - original scan object
*/
public InternalScan(Scan scan) throws IOException {
super(scan);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java
index c6ff029c5e6..47ff5c38d39 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java
@@ -51,14 +51,14 @@ public interface InternalScanner extends Closeable {
/**
* Grab the next row's worth of values.
- * @param result return output array n * @return true if more rows exist after this one, false if
- * scanner is done
+ * @param result return output array
+ * @return true if more rows exist after this one, false if scanner is done
* @throws IOException e
*/
boolean next(List result, ScannerContext scannerContext) throws IOException;
/**
- * Closes the scanner and releases any resources it has allocated n
+ * Closes the scanner and releases any resources it has allocated
*/
@Override
void close() throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
index 7041e6c7914..1fe80bc58b0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
@@ -65,7 +65,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner
protected KVScannerComparator comparator;
/**
- * Constructor. This KeyValueHeap will handle closing of passed in KeyValueScanners. nn
+ * Constructor. This KeyValueHeap will handle closing of passed in KeyValueScanners.
*/
public KeyValueHeap(List extends KeyValueScanner> scanners, CellComparator comparator)
throws IOException {
@@ -73,7 +73,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner
}
/**
- * Constructor. nnn
+ * Constructor.
*/
KeyValueHeap(List extends KeyValueScanner> scanners, KVScannerComparator comparator)
throws IOException {
@@ -170,7 +170,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner
protected CellComparator kvComparator;
/**
- * Constructor n
+ * Constructor
*/
public KVScannerComparator(CellComparator kvComparator) {
this.kvComparator = kvComparator;
@@ -190,15 +190,15 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner
}
/**
- * Compares two KeyValue nn * @return less than 0 if left is smaller, 0 if equal etc..
+ * Compares two KeyValue
+ * @return less than 0 if left is smaller, 0 if equal etc..
*/
public int compare(Cell left, Cell right) {
return this.kvComparator.compare(left, right);
}
/**
- * n
- */
+ * */
public CellComparator getComparator() {
return this.kvComparator;
}
@@ -232,7 +232,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner
* This function (and {@link #reseek(Cell)}) does not do multi-column Bloom filter and lazy-seek
* optimizations. To enable those, call {@link #requestSeek(Cell, boolean, boolean)}.
* @param seekKey KeyValue to seek at or after
- * @return true if KeyValues exist at or after specified key, false if not n
+ * @return true if KeyValues exist at or after specified key, false if not
*/
@Override
public boolean seek(Cell seekKey) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
index ac2cb7aef12..d90cf78dda5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
@@ -152,7 +152,7 @@ public interface KeyValueScanner extends Shipper, Closeable {
/**
* Seek the scanner at the first KeyValue of last row
- * @return true if scanner has values left, false if the underlying data is empty n
+ * @return true if scanner has values left, false if the underlying data is empty
*/
public boolean seekToLastRow() throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
index 164615c089e..947944baf91 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
@@ -41,7 +41,8 @@ public interface MemStore {
MemStoreSnapshot snapshot();
/**
- * Clears the current snapshot of the Memstore. nn * @see #snapshot()
+ * Clears the current snapshot of the Memstore.
+ * @see #snapshot()
*/
void clearSnapshot(long id) throws UnexpectedStateException;
@@ -59,14 +60,16 @@ public interface MemStore {
MemStoreSize getSnapshotSize();
/**
- * Write an update n * @param memstoreSizing The delta in memstore size will be passed back via
- * this. This will include both data size and heap overhead delta.
+ * Write an update
+ * @param memstoreSizing The delta in memstore size will be passed back via this. This will
+ * include both data size and heap overhead delta.
*/
void add(final Cell cell, MemStoreSizing memstoreSizing);
/**
- * Write the updates n * @param memstoreSizing The delta in memstore size will be passed back via
- * this. This will include both data size and heap overhead delta.
+ * Write the updates
+ * @param memstoreSizing The delta in memstore size will be passed back via this. This will
+ * include both data size and heap overhead delta.
*/
void add(Iterable cells, MemStoreSizing memstoreSizing);
@@ -83,8 +86,8 @@ public interface MemStore {
* visible. May want to change this so it is atomic across all KeyValues.
*
* This is called under row lock, so Get operations will still see updates atomically. Scans will
- * only see each KeyValue update as atomic. n * @param readpoint readpoint below which we can
- * safely remove duplicate Cells.
+ * only see each KeyValue update as atomic.
+ * @param readpoint readpoint below which we can safely remove duplicate Cells.
* @param memstoreSizing The delta in memstore size will be passed back via this. This will
* include both data size and heap overhead delta.
*/
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index d976917c656..44b19192542 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -110,8 +110,7 @@ public class MemStoreFlusher implements FlushRequester {
};
/**
- * nn
- */
+ * */
public MemStoreFlusher(final Configuration conf, final HRegionServer server) {
super();
this.conf = conf;
@@ -536,8 +535,9 @@ public class MemStoreFlusher implements FlushRequester {
/**
* A flushRegion that checks store file count. If too many, puts the flush on delay queue to retry
- * later. n * @return true if the region was successfully flushed, false otherwise. If false,
- * there will be accompanying log messages explaining why the region was not flushed.
+ * later.
+ * @return true if the region was successfully flushed, false otherwise. If false, there will be
+ * accompanying log messages explaining why the region was not flushed.
*/
private boolean flushRegion(final FlushRegionEntry fqe) {
HRegion region = fqe.region;
@@ -790,7 +790,7 @@ public class MemStoreFlusher implements FlushRequester {
}
/**
- * Register a MemstoreFlushListener n
+ * Register a MemstoreFlushListener
*/
@Override
public void registerFlushRequestListener(final FlushRequestListener listener) {
@@ -798,8 +798,8 @@ public class MemStoreFlusher implements FlushRequester {
}
/**
- * Unregister the listener from MemstoreFlushListeners n * @return true when passed listener is
- * unregistered successfully.
+ * Unregister the listener from MemstoreFlushListeners
+ * @return true when passed listener is unregistered successfully.
*/
@Override
public boolean unregisterFlushRequestListener(final FlushRequestListener listener) {
@@ -807,7 +807,7 @@ public class MemStoreFlusher implements FlushRequester {
}
/**
- * Sets the global memstore limit to a new size. n
+ * Sets the global memstore limit to a new size.
*/
@Override
public void setGlobalMemStoreLimit(long globalMemStoreSize) {
@@ -843,9 +843,7 @@ public class MemStoreFlusher implements FlushRequester {
this.tracker = tracker;
}
- /**
- * n * @return True if we have been delayed > maximumWait milliseconds.
- */
+ /** Returns True if we have been delayed > maximumWait milliseconds. */
public boolean isMaximumWait(final long maximumWait) {
return (EnvironmentEdgeManager.currentTime() - this.createTime) > maximumWait;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
index 0152d1db38d..fd41515d2e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
@@ -73,9 +73,7 @@ public class MiniBatchOperationInProgress {
return this.lastIndexExclusive - this.firstIndex;
}
- /**
- * n * @return The operation(Mutation) at the specified position.
- */
+ /** Returns The operation(Mutation) at the specified position. */
public T getOperation(int index) {
return operations[getAbsoluteIndex(index)];
}
@@ -83,29 +81,25 @@ public class MiniBatchOperationInProgress {
/**
* Sets the status code for the operation(Mutation) at the specified position. By setting this
* status, {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} can make HRegion to skip
- * Mutations. nn
+ * Mutations.
*/
public void setOperationStatus(int index, OperationStatus opStatus) {
this.retCodeDetails[getAbsoluteIndex(index)] = opStatus;
}
- /**
- * n * @return Gets the status code for the operation(Mutation) at the specified position.
- */
+ /** Returns Gets the status code for the operation(Mutation) at the specified position. */
public OperationStatus getOperationStatus(int index) {
return this.retCodeDetails[getAbsoluteIndex(index)];
}
/**
- * Sets the walEdit for the operation(Mutation) at the specified position. nn
+ * Sets the walEdit for the operation(Mutation) at the specified position.
*/
public void setWalEdit(int index, WALEdit walEdit) {
this.walEditsFromCoprocessors[getAbsoluteIndex(index)] = walEdit;
}
- /**
- * n * @return Gets the walEdit for the operation(Mutation) at the specified position.
- */
+ /** Returns Gets the walEdit for the operation(Mutation) at the specified position. */
public WALEdit getWalEdit(int index) {
return this.walEditsFromCoprocessors[getAbsoluteIndex(index)];
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
index 299ae4b77a1..95d344fc15d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
@@ -73,7 +73,7 @@ public class MultiVersionConcurrencyControl {
}
/**
- * Step the MVCC forward on to a new read/write basis. n
+ * Step the MVCC forward on to a new read/write basis.
*/
public void advanceTo(long newStartPoint) {
while (true) {
@@ -173,7 +173,7 @@ public class MultiVersionConcurrencyControl {
* this even if the write has FAILED (AFTER backing out the write transaction changes completely)
* so we can clean up the outstanding transaction. How much is the read point advanced? Let S be
* the set of all write numbers that are completed. Set the read point to the highest numbered
- * write of S. n *
+ * write of S.
* @return true if e is visible to MVCC readers (that is, readpoint >= e.writeNumber)
*/
public boolean complete(WriteEntry writeEntry) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableOnlineRegions.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableOnlineRegions.java
index 7a1dc1c382f..5c619b83a26 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableOnlineRegions.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableOnlineRegions.java
@@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface MutableOnlineRegions extends OnlineRegions {
/**
- * Add to online regions. n
+ * Add to online regions.
*/
void addRegion(final HRegion r);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java
index 51566611652..d16e409a2fd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java
@@ -33,13 +33,15 @@ public interface OnlineRegions {
/**
* Return {@link Region} instance. Only works if caller is in same context, in same JVM. Region is
- * not serializable. n * @return Region for the passed encoded encodedRegionName or
- * null if named region is not member of the online regions.
+ * not serializable.
+ * @return Region for the passed encoded encodedRegionName or null if named region is
+ * not member of the online regions.
*/
Region getRegion(String encodedRegionName);
/**
- * Get all online regions of a table in this RS. n * @return List of Region
+ * Get all online regions of a table in this RS.
+ * @return List of Region
* @throws java.io.IOException
*/
List extends Region> getRegions(TableName tableName) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OperationStatus.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OperationStatus.java
index bf9b89dd65d..61b6bd0cf1b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OperationStatus.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OperationStatus.java
@@ -66,22 +66,19 @@ public class OperationStatus {
}
/**
- * n
- */
+ * */
public OperationStatusCode getOperationStatusCode() {
return code;
}
/**
- * n
- */
+ * */
public Result getResult() {
return result;
}
/**
- * n
- */
+ * */
public String getExceptionMsg() {
return exceptionMsg;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 9f2a6eef520..d49c217fe33 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -907,7 +907,7 @@ public class RSRpcServices extends HBaseRpcServicesBase
}
/**
- * Execute a list of mutations. nnn
+ * Execute a list of mutations.
*/
private void doBatchOp(final RegionActionResult.Builder builder, final HRegion region,
final OperationQuota quota, final List mutations, final CellScanner cells,
@@ -1497,7 +1497,7 @@ public class RSRpcServices extends HBaseRpcServicesBase
/**
* Close a region on the region server.
* @param controller the RPC controller
- * @param request the request n
+ * @param request the request
*/
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
@@ -1529,7 +1529,7 @@ public class RSRpcServices extends HBaseRpcServicesBase
/**
* Compact a region on the region server.
* @param controller the RPC controller
- * @param request the request n
+ * @param request the request
*/
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
@@ -1593,7 +1593,7 @@ public class RSRpcServices extends HBaseRpcServicesBase
/**
* Flush a region on the region server.
* @param controller the RPC controller
- * @param request the request n
+ * @param request the request
*/
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
@@ -1773,7 +1773,7 @@ public class RSRpcServices extends HBaseRpcServicesBase
/**
* Get some information of the region server.
* @param controller the RPC controller
- * @param request the request n
+ * @param request the request
*/
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
@@ -1874,7 +1874,7 @@ public class RSRpcServices extends HBaseRpcServicesBase
* errors are put in the response as FAILED_OPENING.
*
* @param controller the RPC controller
- * @param request the request n
+ * @param request the request
*/
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
@@ -2249,7 +2249,7 @@ public class RSRpcServices extends HBaseRpcServicesBase
/**
* Roll the WAL writer of the region server.
* @param controller the RPC controller
- * @param request the request n
+ * @param request the request
*/
@Override
public RollWALWriterResponse rollWALWriter(final RpcController controller,
@@ -2270,7 +2270,7 @@ public class RSRpcServices extends HBaseRpcServicesBase
/**
* Stop the region server.
* @param controller the RPC controller
- * @param request the request n
+ * @param request the request
*/
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
@@ -2453,7 +2453,7 @@ public class RSRpcServices extends HBaseRpcServicesBase
/**
* Get data from a table.
* @param controller the RPC controller
- * @param request the get request n
+ * @param request the get request
*/
@Override
public GetResponse get(final RpcController controller, final GetRequest request)
@@ -2651,7 +2651,7 @@ public class RSRpcServices extends HBaseRpcServicesBase
/**
* Execute multiple actions on a table: get, mutate, and/or execCoprocessor
* @param rpcc the RPC controller
- * @param request the multi request n
+ * @param request the multi request
*/
@Override
public MultiResponse multi(final RpcController rpcc, final MultiRequest request)
@@ -3487,7 +3487,7 @@ public class RSRpcServices extends HBaseRpcServicesBase
/**
* Scan data in a table.
* @param controller the RPC controller
- * @param request the scan request n
+ * @param request the scan request
*/
@Override
public ScanResponse scan(final RpcController controller, final ScanRequest request)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index 156d4054055..6a897a5b9f3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -106,7 +106,7 @@ public interface Region extends ConfigurationObserver {
/**
* Check the region's underlying store files, open the files that have not been opened yet, and
- * remove the store file readers for store files no longer available. n
+ * remove the store file readers for store files no longer available.
*/
boolean refreshStoreFiles() throws IOException;
@@ -219,7 +219,7 @@ public interface Region extends ConfigurationObserver {
* read lock and checks if the region is closing or closed.
*
* {@link #closeRegionOperation} MUST then always be called after the operation has completed,
- * whether it succeeded or failed. n
+ * whether it succeeded or failed.
*/
// TODO Exposing this and closeRegionOperation() as we have getRowLock() exposed.
// Remove if we get rid of exposing getRowLock().
@@ -231,18 +231,18 @@ public interface Region extends ConfigurationObserver {
*
* {@link #closeRegionOperation} MUST then always be called after the operation has completed,
* whether it succeeded or failed.
- * @param op The operation is about to be taken on the region n
+ * @param op The operation is about to be taken on the region
*/
void startRegionOperation(Operation op) throws IOException;
/**
- * Closes the region operation lock. n
+ * Closes the region operation lock.
*/
void closeRegionOperation() throws IOException;
/**
* Closes the region operation lock. This needs to be called in the finally block corresponding to
- * the try block of {@link #startRegionOperation(Operation)} n
+ * the try block of {@link #startRegionOperation(Operation)}
*/
void closeRegionOperation(Operation op) throws IOException;
@@ -282,7 +282,8 @@ public interface Region extends ConfigurationObserver {
// Region operations
/**
- * Perform one or more append operations on a row. n * @return result of the operation n
+ * Perform one or more append operations on a row.
+ * @return result of the operation
*/
Result append(Append append) throws IOException;
@@ -293,7 +294,7 @@ public interface Region extends ConfigurationObserver {
* previous operation in the same batch when performing the operations in the batch.
* @param mutations the list of mutations
* @return an array of OperationStatus which internally contains the OperationStatusCode and the
- * exceptionMessage if any. n
+ * exceptionMessage if any.
*/
OperationStatus[] batchMutate(Mutation[] mutations) throws IOException;
@@ -453,7 +454,7 @@ public interface Region extends ConfigurationObserver {
CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws IOException;
/**
- * Deletes the specified cells/row. nn
+ * Deletes the specified cells/row.
*/
void delete(Delete delete) throws IOException;
@@ -477,7 +478,8 @@ public interface Region extends ConfigurationObserver {
* specified by the {@link Scan}.
*
* This Iterator must be closed by the caller.
- * @param scan configured {@link Scan} n * @throws IOException read exceptions
+ * @param scan configured {@link Scan}
+ * @throws IOException read exceptions
*/
RegionScanner getScanner(Scan scan) throws IOException;
@@ -489,8 +491,8 @@ public interface Region extends ConfigurationObserver {
*
* This Iterator must be closed by the caller.
* @param scan configured {@link Scan}
- * @param additionalScanners Any additional scanners to be used n * @throws IOException read
- * exceptions
+ * @param additionalScanners Any additional scanners to be used
+ * @throws IOException read exceptions
*/
RegionScanner getScanner(Scan scan, List additionalScanners) throws IOException;
@@ -498,7 +500,8 @@ public interface Region extends ConfigurationObserver {
CellComparator getCellComparator();
/**
- * Perform one or more increment operations on a row. n * @return result of the operation n
+ * Perform one or more increment operations on a row.
+ * @return result of the operation
*/
Result increment(Increment increment) throws IOException;
@@ -506,7 +509,7 @@ public interface Region extends ConfigurationObserver {
* Performs multiple mutations atomically on a single row.
* @param mutations object that specifies the set of mutations to perform atomically
* @return results of Increment/Append operations. If no Increment/Append operations, it returns
- * null n
+ * null
*/
Result mutateRow(RowMutations mutations) throws IOException;
@@ -519,7 +522,7 @@ public interface Region extends ConfigurationObserver {
* @param nonceGroup Optional nonce group of the operation (client Id)
* @param nonce Optional nonce of the operation (unique random id to ensure "more
* idempotence") If multiple rows are locked care should be taken that
- * rowsToLock is sorted in order to avoid deadlocks. n
+ * rowsToLock is sorted in order to avoid deadlocks.
*/
// TODO Should not be exposing with params nonceGroup, nonce. Change when doing the jira for
// Changing processRowsWithLocks
@@ -527,7 +530,7 @@ public interface Region extends ConfigurationObserver {
long nonceGroup, long nonce) throws IOException;
/**
- * Puts some data in the table. nn
+ * Puts some data in the table.
*/
void put(Put put) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index 42e1eaa886b..ef84ca31f1d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -328,7 +328,7 @@ public class RegionCoprocessorHost
/**
* Sanity check the table coprocessor attributes of the supplied schema. Will throw an exception
- * if there is a problem. nnn
+ * if there is a problem.
*/
public static void testTableCoprocessorAttrs(final Configuration conf, final TableDescriptor htd)
throws IOException {
@@ -560,7 +560,7 @@ public class RegionCoprocessorHost
* @param store The store where compaction is being requested
* @param candidates The currently available store files
* @param tracker used to track the life cycle of a compaction
- * @param user the user n
+ * @param user the user
*/
public boolean preCompactSelection(final HStore store, final List candidates,
final CompactionLifeCycleTracker tracker, final User user) throws IOException {
@@ -625,7 +625,7 @@ public class RegionCoprocessorHost
* @param tracker used to track the life cycle of a compaction
* @param request the compaction request
* @param user the user
- * @return Scanner to use (cannot be null!) n
+ * @return Scanner to use (cannot be null!)
*/
public InternalScanner preCompact(final HStore store, final InternalScanner scanner,
final ScanType scanType, final CompactionLifeCycleTracker tracker,
@@ -654,7 +654,7 @@ public class RegionCoprocessorHost
* @param resultFile the new store file written during compaction
* @param tracker used to track the life cycle of a compaction
* @param request the compaction request
- * @param user the user n
+ * @param user the user
*/
public void postCompact(final HStore store, final HStoreFile resultFile,
final CompactionLifeCycleTracker tracker, final CompactionRequest request, final User user)
@@ -688,7 +688,7 @@ public class RegionCoprocessorHost
/**
* Invoked before a memstore flush
- * @return Scanner to use (cannot be null!) n
+ * @return Scanner to use (cannot be null!)
*/
public InternalScanner preFlush(HStore store, InternalScanner scanner,
FlushLifeCycleTracker tracker) throws IOException {
@@ -709,7 +709,7 @@ public class RegionCoprocessorHost
}
/**
- * Invoked before a memstore flush n
+ * Invoked before a memstore flush
*/
public void preFlush(FlushLifeCycleTracker tracker) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@@ -721,7 +721,7 @@ public class RegionCoprocessorHost
}
/**
- * Invoked after a memstore flush n
+ * Invoked after a memstore flush
*/
public void postFlush(FlushLifeCycleTracker tracker) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@@ -788,7 +788,7 @@ public class RegionCoprocessorHost
}
/**
- * Invoked after a memstore flush n
+ * Invoked after a memstore flush
*/
public void postFlush(HStore store, HStoreFile storeFile, FlushLifeCycleTracker tracker)
throws IOException {
@@ -1270,8 +1270,8 @@ public class RegionCoprocessorHost
/**
* @param s the scanner
* @param results the result set returned by the region server
- * @param limit the maximum number of results to return n * @return 'has more' indication to
- * give to client
+ * @param limit the maximum number of results to return
+ * @return 'has more' indication to give to client
* @exception IOException Exception
*/
public boolean postScannerNext(final InternalScanner s, final List results,
@@ -1293,7 +1293,7 @@ public class RegionCoprocessorHost
* filter.
* @param s the scanner
* @param curRowCell The cell in the current row which got filtered out
- * @return whether more rows are available for the scanner or not n
+ * @return whether more rows are available for the scanner or not
*/
public boolean postScannerFilterRow(final InternalScanner s, final Cell curRowCell)
throws IOException {
@@ -1454,7 +1454,7 @@ public class RegionCoprocessorHost
/**
* @param familyPaths pairs of { CF, file path } submitted for bulk load
- * @param map Map of CF to List of file paths for the final loaded files n
+ * @param map Map of CF to List of file paths for the final loaded files
*/
public void postBulkLoadHFile(final List> familyPaths,
Map> map) throws IOException {
@@ -1491,10 +1491,10 @@ public class RegionCoprocessorHost
* @param fs fileystem to read from
* @param p path to the file
* @param in {@link FSDataInputStreamWrapper}
- * @param size Full size of the file n * @param r original reference file. This will be not null
- * only when reading a split file.
+ * @param size Full size of the file
+ * @param r original reference file. This will be not null only when reading a split file.
* @return a Reader instance to use instead of the base reader if overriding default behavior,
- * null otherwise n
+ * null otherwise
*/
public StoreFileReader preStoreFileReaderOpen(final FileSystem fs, final Path p,
final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf,
@@ -1515,10 +1515,10 @@ public class RegionCoprocessorHost
* @param fs fileystem to read from
* @param p path to the file
* @param in {@link FSDataInputStreamWrapper}
- * @param size Full size of the file n * @param r original reference file. This will be not null
- * only when reading a split file.
+ * @param size Full size of the file
+ * @param r original reference file. This will be not null only when reading a split file.
* @param reader the base reader instance
- * @return The reader to use n
+ * @return The reader to use
*/
public StoreFileReader postStoreFileReaderOpen(final FileSystem fs, final Path p,
final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
index 693da0312fc..cea136a9a05 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
@@ -42,7 +42,7 @@ public interface RegionScanner extends InternalScanner {
/**
* Do a reseek to the required row. Should not be used to seek to a key which may come before the
- * current position. Always seeks to the beginning of a row boundary. nn * if row is null
+ * current position. Always seeks to the beginning of a row boundary. if row is null
*/
boolean reseek(byte[] row) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
index 6dd2980fdca..97ca8d41ad4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
@@ -91,7 +91,8 @@ public abstract class RegionSplitPolicy extends Configured {
}
/**
- * Create the RegionSplitPolicy configured for the given table. nn * @return a RegionSplitPolicy n
+ * Create the RegionSplitPolicy configured for the given table.
+ * @return a RegionSplitPolicy
*/
public static RegionSplitPolicy create(HRegion region, Configuration conf) throws IOException {
Preconditions.checkNotNull(region, "Region should not be null.");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java
index b0dc152d129..5f893efd88f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java
@@ -39,7 +39,6 @@ public interface ReplicationSinkService extends ReplicationService {
* @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace
* directory required for replicating hfiles
* @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory
- * n
*/
void replicateLogEntries(List entries, CellScanner cells, String replicationClusterId,
String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java
index b6fdd21b09c..60c634578aa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java
@@ -36,8 +36,7 @@ import org.apache.yetus.audience.InterfaceAudience;
public class ReversedKeyValueHeap extends KeyValueHeap {
/**
- * nnn
- */
+ * */
public ReversedKeyValueHeap(List extends KeyValueScanner> scanners, CellComparator comparator)
throws IOException {
super(scanners, new ReversedKVScannerComparator(comparator));
@@ -150,7 +149,7 @@ public class ReversedKeyValueHeap extends KeyValueHeap {
private static class ReversedKVScannerComparator extends KVScannerComparator {
/**
- * Constructor n
+ * Constructor
*/
public ReversedKVScannerComparator(CellComparator kvComparator) {
super(kvComparator);
@@ -166,7 +165,8 @@ public class ReversedKeyValueHeap extends KeyValueHeap {
}
/**
- * Compares rows of two KeyValue nn * @return less than 0 if left is smaller, 0 if equal etc..
+ * Compares rows of two KeyValue
+ * @return less than 0 if left is smaller, 0 if equal etc..
*/
public int compareRows(Cell left, Cell right) {
return super.kvComparator.compareRows(left, right);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java
index eb1df834c7f..d0ea2e08d17 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java
@@ -36,8 +36,9 @@ public class ReversedStoreScanner extends StoreScanner implements KeyValueScanne
/**
* Opens a scanner across memstore, snapshot, and all StoreFiles. Assumes we are not in a
* compaction.
- * @param store who we scan n * @param scan the spec
- * @param columns which columns we are scanning n
+ * @param store who we scan
+ * @param scan the spec
+ * @param columns which columns we are scanning
*/
public ReversedStoreScanner(HStore store, ScanInfo scanInfo, Scan scan,
NavigableSet columns, long readPt) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java
index 7376854ccbd..b0c49711032 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java
@@ -53,7 +53,7 @@ public class ScanInfo {
+ (4 * Bytes.SIZEOF_LONG) + (4 * Bytes.SIZEOF_BOOLEAN));
/**
- * n * @param family {@link ColumnFamilyDescriptor} describing the column family
+ * @param family {@link ColumnFamilyDescriptor} describing the column family
* @param ttl Store's TTL (in ms)
* @param timeToPurgeDeletes duration in ms after which a delete marker can be purged during a
* major compaction.
@@ -75,7 +75,7 @@ public class ScanInfo {
}
/**
- * n * @param family Name of this store's column family
+ * @param family Name of this store's column family
* @param minVersions Store's MIN_VERSIONS setting
* @param maxVersions Store's VERSIONS setting
* @param ttl Store's TTL (in ms)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
index 2de6dfb8819..c9655d7fafd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
@@ -55,7 +55,7 @@ public class ScannerContext {
/**
* A different set of progress fields. Only include batch, dataSize and heapSize. Compare to
* LimitFields, ProgressFields doesn't contain time field. As we save a deadline in LimitFields,
- * so use {@link EnvironmentEdgeManager.currentTime()} directly when check time limit.
+ * so use {@link EnvironmentEdgeManager#currentTime()} directly when check time limit.
*/
ProgressFields progress;
@@ -214,8 +214,8 @@ public class ScannerContext {
* Note that this is not a typical setter. This setter returns the {@link NextState} that was
* passed in so that methods can be invoked against the new state. Furthermore, this pattern
* allows the {@link NoLimitScannerContext} to cleanly override this setter and simply return the
- * new state, thus preserving the immutability of {@link NoLimitScannerContext} n * @return The
- * state that was passed in.
+ * new state, thus preserving the immutability of {@link NoLimitScannerContext}
+ * @return The state that was passed in.
*/
NextState setScannerState(NextState state) {
if (!NextState.isValidState(state)) {
@@ -236,32 +236,24 @@ public class ScannerContext {
|| scannerState == NextState.BATCH_LIMIT_REACHED;
}
- /**
- * n * @return true if the batch limit can be enforced in the checker's scope
- */
+ /** Returns true if the batch limit can be enforced in the checker's scope */
boolean hasBatchLimit(LimitScope checkerScope) {
return limits.canEnforceBatchLimitFromScope(checkerScope) && limits.getBatch() > 0;
}
- /**
- * n * @return true if the size limit can be enforced in the checker's scope
- */
+ /** Returns true if the size limit can be enforced in the checker's scope */
boolean hasSizeLimit(LimitScope checkerScope) {
return limits.canEnforceSizeLimitFromScope(checkerScope)
&& (limits.getDataSize() > 0 || limits.getHeapSize() > 0);
}
- /**
- * n * @return true if the time limit can be enforced in the checker's scope
- */
+ /** Returns true if the time limit can be enforced in the checker's scope */
boolean hasTimeLimit(LimitScope checkerScope) {
return limits.canEnforceTimeLimitFromScope(checkerScope)
&& (limits.getTime() > 0 || returnImmediately);
}
- /**
- * n * @return true if any limit can be enforced within the checker's scope
- */
+ /** Returns true if any limit can be enforced within the checker's scope */
boolean hasAnyLimit(LimitScope checkerScope) {
return hasBatchLimit(checkerScope) || hasSizeLimit(checkerScope) || hasTimeLimit(checkerScope);
}
@@ -578,9 +570,7 @@ public class ScannerContext {
this.batch = batch;
}
- /**
- * n * @return true when the limit can be enforced from the scope of the checker
- */
+ /** Returns true when the limit can be enforced from the scope of the checker */
boolean canEnforceBatchLimitFromScope(LimitScope checkerScope) {
return LimitScope.BETWEEN_CELLS.canEnforceLimitFromScope(checkerScope);
}
@@ -613,9 +603,7 @@ public class ScannerContext {
this.sizeScope = scope;
}
- /**
- * n * @return true when the limit can be enforced from the scope of the checker
- */
+ /** Returns true when the limit can be enforced from the scope of the checker */
boolean canEnforceSizeLimitFromScope(LimitScope checkerScope) {
return this.sizeScope.canEnforceLimitFromScope(checkerScope);
}
@@ -640,9 +628,7 @@ public class ScannerContext {
this.timeScope = scope;
}
- /**
- * n * @return true when the limit can be enforced from the scope of the checker
- */
+ /** Returns true when the limit can be enforced from the scope of the checker */
boolean canEnforceTimeLimitFromScope(LimitScope checkerScope) {
return this.timeScope.canEnforceLimitFromScope(checkerScope);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
index 01d95ad863f..a6a67fb9139 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
@@ -459,8 +459,8 @@ public class SecureBulkLoadManager {
}
/**
- * Check if the path is referencing a file. This is mainly needed to avoid symlinks. n * @return
- * true if the p is a file n
+ * Check if the path is referencing a file. This is mainly needed to avoid symlinks.
+ * @return true if the p is a file
*/
private boolean isFile(Path p) throws IOException {
FileStatus status = srcFs.getFileStatus(p);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
index 4a392b86a96..a05ac364fc0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
@@ -62,7 +62,7 @@ public final class SegmentFactory {
* create empty immutable segment for initializations This ImmutableSegment is used as a place
* holder for snapshot in Memstore. It won't flush later, So it is not necessary to record the
* initial size for it.
- * @param comparator comparator n
+ * @param comparator comparator
*/
public ImmutableSegment createImmutableSegment(CellComparator comparator) {
MutableSegment segment = generateMutableSegment(null, comparator, null, null);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShipperListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShipperListener.java
index 0881a80d510..fc85b5fa22a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShipperListener.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShipperListener.java
@@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface ShipperListener {
/**
- * The action that needs to be performed before {@link Shipper#shipped()} is performed n
+ * The action that needs to be performed before {@link Shipper#shipped()} is performed
*/
void beforeShipped() throws IOException;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java
index d84bf1c2d24..7bf4f3d2957 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java
@@ -67,8 +67,8 @@ public class ShutdownHook {
*
* To suppress all shutdown hook handling -- both the running of the regionserver hook and of the
* hdfs hook code -- set {@link ShutdownHook#RUN_SHUTDOWN_HOOK} in {@link Configuration} to
- * false . This configuration value is checked when the hook code runs. n * @param fs
- * Instance of Filesystem used by the RegionServer
+ * false . This configuration value is checked when the hook code runs.
+ * @param fs Instance of Filesystem used by the RegionServer
* @param stop Installed shutdown hook will call stop against this passed
* Stoppable instance.
* @param threadToJoin After calling stop on stop will then join this thread.
@@ -241,7 +241,7 @@ public class ShutdownHook {
/**
* Main to test basic functionality. Run with clean hadoop 0.20 and hadoop 0.21 and cloudera
* patched hadoop to make sure our shutdown hook handling works for all compbinations. Pass
- * '-Dhbase.shutdown.hook=false' to test turning off the running of shutdown hooks. nn
+ * '-Dhbase.shutdown.hook=false' to test turning off the running of shutdown hooks.
*/
public static void main(final String[] args) throws IOException {
Configuration conf = HBaseConfiguration.create();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
index cd6676b563c..39cf9eacd2e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
@@ -201,7 +201,7 @@ public interface Store {
/**
* Checks the underlying store files, and opens the files that have not been opened, and removes
* the store file readers for store files no longer available. Mainly used by secondary region
- * replicas to keep up to date with the primary region files. n
+ * replicas to keep up to date with the primary region files.
*/
void refreshStoreFiles() throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
index 5c915e6b177..4b6a375fdb9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
@@ -234,14 +234,14 @@ public class StoreFileInfo implements Configurable {
}
/**
- * Size of the Hfile n
+ * Size of the Hfile
*/
public long getSize() {
return size;
}
/**
- * Sets the region coprocessor env. n
+ * Sets the region coprocessor env.
*/
public void setRegionCoprocessorHost(RegionCoprocessorHost coprocessorHost) {
this.coprocessorHost = coprocessorHost;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index 75fb8978c44..36c67f41a3e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -196,8 +196,8 @@ public class StoreFileReader {
* @deprecated since 2.0.0 and will be removed in 3.0.0. Do not write further code which depends
* on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner
* class/interface which is the preferred way to scan a store with higher level
- * concepts. n * should we cache the blocks? n * use pread (for concurrent small
- * readers) n * is scanner being used for compaction?
+ * concepts. should we cache the blocks? use pread (for concurrent small readers) is
+ * scanner being used for compaction?
* @return the underlying HFileScanner
* @see HBASE-15296
*/
@@ -320,7 +320,7 @@ public class StoreFileReader {
/**
* A method for checking Bloom filters. Called directly from StoreFileScanner in case of a
- * multi-column query. n * the cell to check if present in BloomFilter
+ * multi-column query. the cell to check if present in BloomFilter
* @return True if passes
*/
public boolean passesGeneralRowColBloomFilter(Cell cell) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index ce2a3d6f249..74147f8ec05 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -299,9 +299,7 @@ public class StoreFileScanner implements KeyValueScanner {
closed = true;
}
- /**
- * nn * @return false if not found or if k is after the end. n
- */
+ /** Returns false if not found or if k is after the end. */
public static boolean seekAtOrAfter(HFileScanner s, Cell k) throws IOException {
int result = s.seekTo(k);
if (result < 0) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java
index c750167d2ae..9e748e4e7d9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java
@@ -46,7 +46,7 @@ interface StoreFlushContext {
/**
* Commit the flush - add the store file to the store and clear the memstore snapshot. Requires
* pausing scans. A very short operation
- * @return whether compaction is required n
+ * @return whether compaction is required
*/
boolean commit(MonitoredTask status) throws IOException;
@@ -55,12 +55,12 @@ interface StoreFlushContext {
* primary region. Adds the new files to the store, and drops the snapshot depending on
* dropMemstoreSnapshot argument.
* @param fileNames names of the flushed files
- * @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot n
+ * @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot
*/
void replayFlush(List fileNames, boolean dropMemstoreSnapshot) throws IOException;
/**
- * Abort the snapshot preparation. Drops the snapshot if any. n
+ * Abort the snapshot preparation. Drops the snapshot if any.
*/
void abort() throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 2cf9fe04cf3..5cfe6742007 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -225,7 +225,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
* compaction.
* @param store who we scan
* @param scan the spec
- * @param columns which columns we are scanning n
+ * @param columns which columns we are scanning
*/
public StoreScanner(HStore store, ScanInfo scanInfo, Scan scan, NavigableSet columns,
long readPt) throws IOException {
@@ -384,8 +384,9 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
}
/**
- * Seek the specified scanners with the given key nn * @param isLazy true if using lazy seek
- * @param isParallelSeek true if using parallel seek n
+ * Seek the specified scanners with the given key
+ * @param isLazy true if using lazy seek
+ * @param isParallelSeek true if using parallel seek
*/
protected void seekScanners(List extends KeyValueScanner> scanners, Cell seekKey,
boolean isLazy, boolean isParallelSeek) throws IOException {
@@ -530,8 +531,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
}
/**
- * Get the next row of values from this Store. nn * @return true if there are more rows, false if
- * scanner is done
+ * Get the next row of values from this Store.
+ * @return true if there are more rows, false if scanner is done
*/
@Override
public boolean next(List outResult, ScannerContext scannerContext) throws IOException {
@@ -1051,7 +1052,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
}
/**
- * Check whether scan as expected order nnnn
+ * Check whether scan as expected order
*/
protected void checkScanOrder(Cell prevKV, Cell kv, CellComparator comparator)
throws IOException {
@@ -1065,8 +1066,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
}
/**
- * Do a reseek in a normal StoreScanner(scan forward) n * @return true if scanner has values left,
- * false if end of scanner n
+ * Do a reseek in a normal StoreScanner(scan forward)
+ * @return true if scanner has values left, false if end of scanner
*/
protected boolean seekAsDirection(Cell kv) throws IOException {
return reseek(kv);
@@ -1157,7 +1158,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
/**
* Seek storefiles in parallel to optimize IO latency as much as possible
* @param scanners the list {@link KeyValueScanner}s to be read from
- * @param kv the KeyValue on which the operation is being requested n
+ * @param kv the KeyValue on which the operation is being requested
*/
private void parallelSeek(final List extends KeyValueScanner> scanners, final Cell kv)
throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java
index 3f4f27380ff..51807658f2a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java
@@ -183,7 +183,7 @@ public abstract class TimeRangeTracker {
/**
* @param data the serialization data. It can't be null!
* @return An instance of NonSyncTimeRangeTracker filled w/ the content of serialized
- * NonSyncTimeRangeTracker in timeRangeTrackerBytes . n
+ * NonSyncTimeRangeTracker in timeRangeTrackerBytes .
*/
public static TimeRangeTracker parseFrom(final byte[] data) throws IOException {
return parseFrom(data, Type.NON_SYNC);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java
index 13bc27c7abc..f81072a12c0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java
@@ -47,7 +47,7 @@ public class CompactionProgress {
}
/**
- * getter for calculated percent complete n
+ * getter for calculated percent complete
*/
public float getProgressPct() {
return (float) currentCompactedKVs / getTotalCompactingKVs();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java
index 2b54081642f..142ee02b5da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java
@@ -165,8 +165,12 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy {
}
/**
- * Check that all files satisfy the constraint FileSize(i) <= ( Sum(0,N,FileSize(_)) - FileSize(i)
- * ) * Ratio.
+ * Check that all files satisfy the constraint
+ *
+ *
+ * FileSize(i) <= ( Sum(0,N,FileSize(_)) - FileSize(i)) * Ratio.
+ *
+ *
* @param files List of store files to consider as a compaction candidate.
* @param currentRatio The ratio to use.
* @return a boolean if these files satisfy the ratio constraints.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java
index 1d039de96fb..d83107a10b9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java
@@ -118,9 +118,7 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy {
public abstract boolean shouldPerformMajorCompaction(Collection filesToCompact)
throws IOException;
- /**
- * n * @return When to run next major compaction
- */
+ /** Returns When to run next major compaction */
public long getNextMajorCompactTime(Collection filesToCompact) {
/** Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_PERIOD}. */
long period = comConf.getMajorCompactionPeriod();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java
index af02d71788f..db4d8050862 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java
@@ -104,20 +104,21 @@ public interface ColumnTracker extends ShipperListener {
ColumnCount getColumnHint();
/**
- * Retrieve the MatchCode for the next row or column n
+ * Retrieve the MatchCode for the next row or column
*/
MatchCode getNextRowOrNextColumn(Cell cell);
/**
* Give the tracker a chance to declare it's done based on only the timestamp to allow an early
- * out. n * @return true to early out based on timestamp.
+ * out.
+ * @return true to early out based on timestamp.
*/
boolean isDone(long timestamp);
/**
* This method is used to inform the column tracker that we are done with this column. We may get
* this information from external filters or timestamp range and we then need to indicate this
- * information to tracker. It is currently implemented for ExplicitColumnTracker. n
+ * information to tracker. It is currently implemented for ExplicitColumnTracker.
*/
default void doneWithColumn(Cell cell) {
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java
index 3557973aae5..8fdee2da524 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java
@@ -96,7 +96,7 @@ public class ScanDeleteTracker implements DeleteTracker {
/**
* Check if the specified Cell buffer has been deleted by a previously seen delete.
- * @param cell - current cell to check if deleted by a previously seen delete n
+ * @param cell - current cell to check if deleted by a previously seen delete
*/
@Override
public DeleteResult isDeleted(Cell cell) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java
index 120b01cca3a..614465c1827 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java
@@ -138,9 +138,7 @@ public abstract class ScanQueryMatcher implements ShipperListener {
this.columns = columns;
}
- /**
- * nn * @return true if the cell is expired
- */
+ /** Returns true if the cell is expired */
private static boolean isCellTTLExpired(final Cell cell, final long oldestTimestamp,
final long now) {
// Look for a TTL tag first. Use it instead of the family setting if
@@ -262,7 +260,7 @@ public abstract class ScanQueryMatcher implements ShipperListener {
protected abstract void reset();
/**
- * Set the row when there is change in row n
+ * Set the row when there is change in row
*/
public void setToNewRow(Cell currentRow) {
this.currentRow = currentRow;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java
index 4d84e5a0fdf..ea0afee2178 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java
@@ -184,7 +184,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker {
}
/**
- * We can never know a-priori if we are done, so always return false. n
+ * We can never know a-priori if we are done, so always return false.
*/
@Override
public boolean done() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
index 2cb5b7e6f47..8d26583a0df 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
@@ -129,7 +129,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager {
/**
* Close this and all running snapshot tasks
- * @param force forcefully stop all running tasks n
+ * @param force forcefully stop all running tasks
*/
@Override
public void stop(boolean force) throws IOException {
@@ -147,7 +147,8 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager {
* If in a running state, creates the specified subprocedure for handling an online snapshot.
* Because this gets the local list of regions to snapshot and not the set the master had, there
* is a possibility of a race where regions may be missed. This detected by the master in the
- * snapshot verification step. n * @return Subprocedure to submit to the ProcedureMember.
+ * snapshot verification step.
+ * @return Subprocedure to submit to the ProcedureMember.
*/
public Subprocedure buildSubprocedure(SnapshotDescription snapshot) {
@@ -209,8 +210,9 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager {
* miss some regions. For example, a region move during a snapshot could result in a region to be
* skipped or done twice. This is manageable because the {@link MasterSnapshotVerifier} will
* double check the region lists after the online portion of the snapshot completes and will
- * explicitly fail the snapshot. n * @return the list of online regions. Empty list is returned if
- * no regions are responsible for the given snapshot. n
+ * explicitly fail the snapshot.
+ * @return the list of online regions. Empty list is returned if no regions are responsible for
+ * the given snapshot.
*/
private List getRegionsToSnapshot(SnapshotDescription snapshot) throws IOException {
List onlineRegions =
@@ -292,8 +294,8 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager {
/**
* Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)}.
* This *must* be called after all tasks are submitted via submitTask.
- * @return true on success, false otherwise n * @throws
- * SnapshotCreationException if the snapshot failed while we were waiting
+ * @return true on success, false otherwise
+ * @throws SnapshotCreationException if the snapshot failed while we were waiting
*/
boolean waitForOutstandingTasks() throws ForeignException, InterruptedException {
LOG.debug("Waiting for local region snapshots to finish.");
@@ -336,7 +338,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager {
}
/**
- * This attempts to cancel out all pending and in progress tasks (interruptions issues) n
+ * This attempts to cancel out all pending and in progress tasks (interruptions issues)
*/
void cancelTasks() throws InterruptedException {
Collection> tasks = futures;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 77708c39590..10a200972b3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -608,7 +608,7 @@ public abstract class AbstractFSWAL implements WAL {
/**
* This is a convenience method that computes a new filename with a given file-number.
- * @param filenum to use n
+ * @param filenum to use
*/
protected Path computeFilename(final long filenum) {
if (filenum < 0) {
@@ -620,7 +620,7 @@ public abstract class AbstractFSWAL implements WAL {
/**
* This is a convenience method that computes a new filename with a given using the current WAL
- * file-number n
+ * file-number
*/
public Path getCurrentFileName() {
return computeFilename(this.filenum.get());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
index 42dcb51e1e7..12f0efc5728 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
@@ -271,7 +271,7 @@ public class ProtobufLogReader extends ReaderBase {
*
*
* In case the trailer size > this.trailerMaxSize, it is read after a WARN message.
- * @return true if a valid trailer is present n
+ * @return true if a valid trailer is present
*/
private boolean setTrailerIfPresent() {
try {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java
index 3040655e813..14d6a973939 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java
@@ -162,7 +162,8 @@ class SequenceIdAccounting {
/**
* We've been passed a new sequenceid for the region. Set it as highest seen for this region and
* if we are to record oldest, or lowest sequenceids, save it as oldest seen if nothing currently
- * older. nnn * @param lowest Whether to keep running account of oldest sequence id.
+ * older.
+ * @param lowest Whether to keep running account of oldest sequence id.
*/
void update(byte[] encodedRegionName, Set families, long sequenceid,
final boolean lowest) {
@@ -260,8 +261,8 @@ class SequenceIdAccounting {
}
/**
- * n * @return New Map that has same keys as src but instead of a Map for a value, it
- * instead has found the smallest sequence id and it returns that as the value instead.
+ * @return New Map that has same keys as src but instead of a Map for a value, it
+ * instead has found the smallest sequence id and it returns that as the value instead.
*/
private > Map flattenToLowestSequenceId(Map src) {
if (src == null || src.isEmpty()) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
index 90825d4884c..954c4c87820 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
@@ -93,7 +93,7 @@ class SyncFuture {
/**
* Call this method to clear old usage and get it ready for new deploy.
- * @param txid the new transaction id n
+ * @param txid the new transaction id
*/
SyncFuture reset(long txid, boolean forceSync) {
if (t != null && t != Thread.currentThread()) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
index 5b9c3da7cf8..564f43324cc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
@@ -123,7 +123,7 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint
}
/**
- * A private method used to re-establish a zookeeper session with a peer cluster. n
+ * A private method used to re-establish a zookeeper session with a peer cluster.
*/
private void reconnect(KeeperException ke) {
if (
@@ -296,8 +296,8 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint
}
/**
- * Report that a {@code SinkPeer} successfully replicated a chunk of data. n * The SinkPeer that
- * had a failed replication attempt on it
+ * Report that a {@code SinkPeer} successfully replicated a chunk of data. The SinkPeer that had a
+ * failed replication attempt on it
*/
protected synchronized void reportSinkSuccess(SinkPeer sinkPeer) {
badReportCounts.remove(sinkPeer.getServerName());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index b2dbd591fc9..4636e239904 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -143,7 +143,7 @@ public class DumpReplicationQueues extends Configured implements Tool {
}
/**
- * Main nn
+ * Main
*/
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java
index 1be425abc34..6e6036bf8da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java
@@ -62,7 +62,7 @@ public class MetricsSink {
}
/**
- * Convience method to change metrics when a batch of operations are applied. n
+ * Convience method to change metrics when a batch of operations are applied.
*/
public void applyBatch(long batchSize) {
mss.incrAppliedBatches(1);
@@ -87,14 +87,14 @@ public class MetricsSink {
}
/**
- * Get the count of the failed bathes n
+ * Get the count of the failed bathes
*/
protected long getFailedBatches() {
return mss.getFailedBatches();
}
/**
- * Get the Age of Last Applied Op n
+ * Get the Age of Last Applied Op
*/
public long getAgeOfLastAppliedOp() {
return mss.getLastAppliedOpAge();
@@ -110,14 +110,14 @@ public class MetricsSink {
}
/**
- * Gets the time stamp from when the Sink was initialized. n
+ * Gets the time stamp from when the Sink was initialized.
*/
public long getStartTimestamp() {
return this.startTimestamp;
}
/**
- * Gets the total number of OPs delivered to this sink. n
+ * Gets the total number of OPs delivered to this sink.
*/
public long getAppliedOps() {
return this.mss.getSinkAppliedOps();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
index 7f7282f4e71..14a753791da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
@@ -127,7 +127,7 @@ public class MetricsSource implements BaseSource {
/**
* get age of last shipped op of given wal group. If the walGroup is null, return 0
- * @param walGroup which group we are getting n
+ * @param walGroup which group we are getting
*/
public long getAgeOfLastShippedOp(String walGroup) {
return this.ageOfLastShippedOp.get(walGroup) == null ? 0 : ageOfLastShippedOp.get(walGroup);
@@ -283,28 +283,28 @@ public class MetricsSource implements BaseSource {
}
/**
- * Get AgeOfLastShippedOp n
+ * Get AgeOfLastShippedOp
*/
public Long getAgeOfLastShippedOp() {
return singleSourceSource.getLastShippedAge();
}
/**
- * Get the sizeOfLogQueue n
+ * Get the sizeOfLogQueue
*/
public int getSizeOfLogQueue() {
return singleSourceSource.getSizeOfLogQueue();
}
/**
- * Get the value of uncleanlyClosedWAL counter n
+ * Get the value of uncleanlyClosedWAL counter
*/
public long getUncleanlyClosedWALs() {
return singleSourceSource.getUncleanlyClosedWALs();
}
/**
- * Get the timestampsOfLastShippedOp, if there are multiple groups, return the latest one n
+ * Get the timestampsOfLastShippedOp, if there are multiple groups, return the latest one
*/
public long getTimestampOfLastShippedOp() {
long lastTimestamp = 0L;
@@ -351,7 +351,7 @@ public class MetricsSource implements BaseSource {
}
/**
- * Get the slave peer ID n
+ * Get the slave peer ID
*/
public String getPeerID() {
return id;
@@ -497,7 +497,7 @@ public class MetricsSource implements BaseSource {
}
/**
- * Returns the amount of memory in bytes used in this RegionServer by edits pending replication. n
+ * Returns the amount of memory in bytes used in this RegionServer by edits pending replication.
*/
public long getWALReaderEditsBufferUsage() {
return globalSourceSource.getWALReaderEditsBufferBytes();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
index 997e303d583..439322e252c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
@@ -458,7 +458,7 @@ public class ReplicationSink {
}
/**
- * Get replication Sink Metrics n
+ * Get replication Sink Metrics
*/
public MetricsSink getSinkMetrics() {
return this.metrics;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
index 4434c33b52c..611376e9ce3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
@@ -112,7 +112,7 @@ public interface RSGroupInfoManager {
/**
* Rename rsgroup
* @param oldName old rsgroup name
- * @param newName new rsgroup name n
+ * @param newName new rsgroup name
*/
void renameRSGroup(String oldName, String newName) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java
index 8912c34c51b..57d156ab1c2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java
@@ -458,7 +458,7 @@ public class AccessChecker {
/**
* Retrieve the groups of the given user.
- * @param user User name n
+ * @param user User name
*/
public static List getUserGroups(String user) {
try {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index a4438460fce..3e4aca26329 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -430,7 +430,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
/**
* Determine if cell ACLs covered by the operation grant access. This is expensive.
- * @return false if cell ACLs failed to grant access, true otherwise n
+ * @return false if cell ACLs failed to grant access, true otherwise
*/
private boolean checkCoveringPermission(User user, OpType request, RegionCoprocessorEnvironment e,
byte[] row, Map> familyMap, long opTs, Action... actions)
@@ -1024,7 +1024,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
}
/**
- * Create the ACL table n
+ * Create the ACL table
*/
private static void createACLTable(Admin admin) throws IOException {
/** Table descriptor for ACL table */
@@ -1817,7 +1817,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
/**
* Authorization check for SecureBulkLoadProtocol.prepareBulkLoad()
- * @param ctx the context n
+ * @param ctx the context
*/
@Override
public void prePrepareBulkLoad(ObserverContext ctx)
@@ -1829,7 +1829,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
/**
* Authorization security check for SecureBulkLoadProtocol.cleanupBulkLoad()
- * @param ctx the context n
+ * @param ctx the context
*/
@Override
public void preCleanupBulkLoad(ObserverContext ctx)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java
index a24e9d66ced..023eccbd27d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java
@@ -490,7 +490,7 @@ public final class AuthManager {
}
/**
- * Last modification logical time n
+ * Last modification logical time
*/
public long getMTime() {
return mtime.get();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
index 5d674c78cdc..7bdef4b6649 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
@@ -254,7 +254,7 @@ public class ZKPermissionWatcher extends ZKListener implements Closeable {
}
/***
- * Write a table's access controls to the permissions mirror in zookeeper nn
+ * Write a table's access controls to the permissions mirror in zookeeper
*/
public void writeToZookeeper(byte[] entry, byte[] permsData) {
String entryName = Bytes.toString(entry);
@@ -271,7 +271,7 @@ public class ZKPermissionWatcher extends ZKListener implements Closeable {
}
/***
- * Delete the acl notify node of table n
+ * Delete the acl notify node of table
*/
public void deleteTableACLNode(final TableName tableName) {
String zkNode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, ACL_NODE);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
index b6163123cbf..9d9f90765c7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
@@ -320,7 +320,8 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
/**
* Adds the mutations to labels region and set the results to the finalOpStatus. finalOpStatus
* might have some entries in it where the OpStatus is FAILURE. We will leave those and set in
- * others in the order. nn * @return whether we need a ZK update or not.
+ * others in the order.
+ * @return whether we need a ZK update or not.
*/
private boolean mutateLabelsRegion(List mutations, OperationStatus[] finalOpStatus)
throws IOException {
@@ -687,7 +688,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
}
/**
- * n * - all the visibility tags associated with the current Cell
+ * - all the visibility tags associated with the current Cell
* @return - the modified visibility expression as byte[]
*/
private byte[] createModifiedVisExpression(final List tags) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java
index 3f969ef64f4..d635e43e27f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java
@@ -30,7 +30,8 @@ import org.apache.yetus.audience.InterfaceAudience;
public interface ScanLabelGenerator extends Configurable {
/**
- * Helps to get a list of lables associated with an UGI nn * @return The labels
+ * Helps to get a list of lables associated with an UGI
+ * @return The labels
*/
public List getLabels(User user, Authorizations authorizations);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
index 7fa8a4ec8c5..c5aa902de3d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
@@ -1013,9 +1013,7 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso
}
}
- /**
- * n * @return NameValuePair of the exception name to stringified version os exception.
- */
+ /** Returns NameValuePair of the exception name to stringified version os exception. */
// Copied from ResponseConverter and made private. Only used in here.
private static NameBytesPair buildException(final Throwable t) {
NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java
index a55ab2aae22..ecf19a52dff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java
@@ -36,40 +36,40 @@ public interface VisibilityLabelService extends Configurable {
/**
* System calls this after opening of regions. Gives a chance for the VisibilityLabelService to so
- * any initialization logic. n * the region coprocessor env
+ * any initialization logic. the region coprocessor env
*/
void init(RegionCoprocessorEnvironment e) throws IOException;
/**
- * Adds the set of labels into the system. n * Labels to add to the system.
+ * Adds the set of labels into the system. Labels to add to the system.
* @return OperationStatus for each of the label addition
*/
OperationStatus[] addLabels(List labels) throws IOException;
/**
- * Sets given labels globally authorized for the user. n * The authorizing user n * Labels which
- * are getting authorized for the user
+ * Sets given labels globally authorized for the user. The authorizing user Labels which are
+ * getting authorized for the user
* @return OperationStatus for each of the label auth addition
*/
OperationStatus[] setAuths(byte[] user, List authLabels) throws IOException;
/**
- * Removes given labels from user's globally authorized list of labels. n * The user whose
- * authorization to be removed n * Labels which are getting removed from authorization set
+ * Removes given labels from user's globally authorized list of labels. The user whose
+ * authorization to be removed Labels which are getting removed from authorization set
* @return OperationStatus for each of the label auth removal
*/
OperationStatus[] clearAuths(byte[] user, List authLabels) throws IOException;
/**
- * Retrieve the visibility labels for the user. n * Name of the user whose authorization to be
- * retrieved n * Whether a system or user originated call.
+ * Retrieve the visibility labels for the user. Name of the user whose authorization to be
+ * retrieved Whether a system or user originated call.
* @return Visibility labels authorized for the given user.
*/
List getUserAuths(byte[] user, boolean systemCall) throws IOException;
/**
- * Retrieve the visibility labels for the groups. n * Name of the groups whose authorization to be
- * retrieved n * Whether a system or user originated call.
+ * Retrieve the visibility labels for the groups. Name of the groups whose authorization to be
+ * retrieved Whether a system or user originated call.
* @return Visibility labels authorized for the given group.
*/
List getGroupAuths(String[] groups, boolean systemCall) throws IOException;
@@ -101,7 +101,7 @@ public interface VisibilityLabelService extends Configurable {
/**
* Creates VisibilityExpEvaluator corresponding to given Authorizations.
* Note: This will be concurrently called from multiple threads and implementation should take
- * care of thread safety. n * Authorizations for the read request
+ * care of thread safety. Authorizations for the read request
* @return The VisibilityExpEvaluator corresponding to the given set of authorization labels.
*/
VisibilityExpEvaluator getVisibilityExpEvaluator(Authorizations authorizations)
@@ -110,8 +110,8 @@ public interface VisibilityLabelService extends Configurable {
/**
* System checks for user auth during admin operations. (ie. Label add, set/clear auth). The
* operation is allowed only for users having system auth. Also during read, if the requesting
- * user has system auth, he can view all the data irrespective of its labels. n * User for whom
- * system auth check to be done.
+ * user has system auth, he can view all the data irrespective of its labels. User for whom system
+ * auth check to be done.
* @return true if the given user is having system/super auth
*/
boolean havingSystemAuth(User user) throws IOException;
@@ -121,12 +121,11 @@ public interface VisibilityLabelService extends Configurable {
* in Delete mutation and the cell in consideration. Also system passes the serialization format
* of visibility tags in Put and Delete.
* Note: This will be concurrently called from multiple threads and implementation should take
- * care of thread safety. n * The visibility tags present in the Put mutation n * The
- * serialization format for the Put visibility tags. A null value for this format
- * means the tags are written with unsorted label ordinals n * - The visibility tags in the delete
- * mutation (the specified Cell Visibility) n * The serialization format for the Delete visibility
- * tags. A null value for this format means the tags are written with unsorted label
- * ordinals
+ * care of thread safety. The visibility tags present in the Put mutation The serialization format
+ * for the Put visibility tags. A null value for this format means the tags are
+ * written with unsorted label ordinals - The visibility tags in the delete mutation (the
+ * specified Cell Visibility) The serialization format for the Delete visibility tags. A
+ * null value for this format means the tags are written with unsorted label ordinals
* @return true if matching tags are found
* @see VisibilityConstants#SORTED_ORDINAL_SERIALIZATION_FORMAT
*/
@@ -138,9 +137,9 @@ public interface VisibilityLabelService extends Configurable {
* are part of the cell created from the WALEdits that are prepared for replication while calling
* {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} .replicate().
* {@link org.apache.hadoop.hbase.security.visibility.VisibilityReplicationEndpoint} calls this
- * API to provide an opportunity to modify the visibility tags before replicating. n * the
- * visibility tags associated with the cell n * the serialization format associated with the tag
- * @return the modified visibility expression in the form of byte[] n
+ * API to provide an opportunity to modify the visibility tags before replicating. the visibility
+ * tags associated with the cell the serialization format associated with the tag
+ * @return the modified visibility expression in the form of byte[]
*/
byte[] encodeVisibilityForReplication(final List visTags, final Byte serializationFormat)
throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java
index ec009116a6b..490f18a2e5e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java
@@ -48,9 +48,8 @@ public class VisibilityLabelServiceManager {
}
/**
- * n * @return singleton instance of {@link VisibilityLabelService}. The FQCN of the
- * implementation class can be specified using
- * "hbase.regionserver.visibility.label.service.class".
+ * @return singleton instance of {@link VisibilityLabelService}. The FQCN of the implementation
+ * class can be specified using "hbase.regionserver.visibility.label.service.class".
* @throws IOException When VLS implementation, as specified in conf, can not be loaded.
*/
public VisibilityLabelService getVisibilityLabelService(Configuration conf) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java
index 4b1d2f4d84f..49c93da4381 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java
@@ -95,7 +95,7 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider {
}
/**
- * @return Singleton instance of VisibilityLabelsCache n * when this is called before calling
+ * @return Singleton instance of VisibilityLabelsCache when this is called before calling
* {@link #createAndGet(ZKWatcher, Configuration)}
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
@@ -250,8 +250,8 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider {
}
/**
- * Returns the list of ordinals of labels associated with the groups n * @return the list of
- * ordinals
+ * Returns the list of ordinals of labels associated with the groups
+ * @return the list of ordinals
*/
public Set getGroupAuthsAsOrdinals(String[] groups) {
this.lock.readLock().lock();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
index ef84e10afdd..d450228ea3e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
@@ -86,8 +86,8 @@ public class VisibilityUtils {
private static final ExpressionExpander EXP_EXPANDER = new ExpressionExpander();
/**
- * Creates the labels data to be written to zookeeper. n * @return Bytes form of labels and their
- * ordinal details to be written to zookeeper.
+ * Creates the labels data to be written to zookeeper.
+ * @return Bytes form of labels and their ordinal details to be written to zookeeper.
*/
public static byte[] getDataToWriteToZooKeeper(Map existingLabels) {
VisibilityLabelsRequest.Builder visReqBuilder = VisibilityLabelsRequest.newBuilder();
@@ -101,8 +101,8 @@ public class VisibilityUtils {
}
/**
- * Creates the user auth data to be written to zookeeper. n * @return Bytes form of user auths
- * details to be written to zookeeper.
+ * Creates the user auth data to be written to zookeeper.
+ * @return Bytes form of user auths details to be written to zookeeper.
*/
public static byte[] getUserAuthsDataToWriteToZooKeeper(Map> userAuths) {
MultiUserAuthorizations.Builder builder = MultiUserAuthorizations.newBuilder();
@@ -119,8 +119,8 @@ public class VisibilityUtils {
/**
* Reads back from the zookeeper. The data read here is of the form written by
- * writeToZooKeeper(Map<byte[], Integer> entries). n * @return Labels and their ordinal
- * details n
+ * writeToZooKeeper(Map<byte[], Integer> entries).
+ * @return Labels and their ordinal details
*/
public static List readLabelsFromZKData(byte[] data)
throws DeserializationException {
@@ -138,7 +138,8 @@ public class VisibilityUtils {
}
/**
- * Reads back User auth data written to zookeeper. n * @return User auth details n
+ * Reads back User auth data written to zookeeper.
+ * @return User auth details
*/
public static MultiUserAuthorizations readUserAuthsFromZKData(byte[] data)
throws DeserializationException {
@@ -159,8 +160,8 @@ public class VisibilityUtils {
* @param conf The configuration to use
* @return Stack of ScanLabelGenerator instances. ScanLabelGenerator classes can be specified in
* Configuration as comma separated list using key
- * "hbase.regionserver.scan.visibility.label.generator.class" n * when any of the
- * specified ScanLabelGenerator class can not be loaded.
+ * "hbase.regionserver.scan.visibility.label.generator.class" when any of the specified
+ * ScanLabelGenerator class can not be loaded.
*/
public static List getScanLabelGenerators(Configuration conf) {
// There can be n SLG specified as comma separated in conf
@@ -218,8 +219,8 @@ public class VisibilityUtils {
/**
* Extracts and partitions the visibility tags and nonVisibility Tags
* @param cell - the cell for which we would extract and partition the visibility and non
- * visibility tags n * - all the visibilty tags of type
- * TagType.VISIBILITY_TAG_TYPE would be added to this list
+ * visibility tags - all the visibilty tags of type TagType.VISIBILITY_TAG_TYPE
+ * would be added to this list
* @param nonVisTags - all the non visibility tags would be added to this list
* @return - the serailization format of the tag. Can be null if no tags are found or if there is
* no visibility tag found
@@ -363,8 +364,8 @@ public class VisibilityUtils {
/**
* This will sort the passed labels in ascending oder and then will write one after the other to
- * the passed stream. n * Unsorted label ordinals n * Stream where to write the labels. n * When
- * IOE during writes to Stream.
+ * the passed stream. Unsorted label ordinals Stream where to write the labels. When IOE during
+ * writes to Stream.
*/
private static void writeLabelOrdinalsToStream(List labelOrdinals, DataOutputStream dos)
throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java
index 3150dc448f9..4092036c2f6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java
@@ -131,8 +131,8 @@ public class ZKVisibilityLabelWatcher extends ZKListener {
}
/**
- * Write a labels mirror or user auths mirror into zookeeper n * @param labelsOrUserAuths true for
- * writing labels and false for user auths.
+ * Write a labels mirror or user auths mirror into zookeeper
+ * @param labelsOrUserAuths true for writing labels and false for user auths.
*/
public void writeToZookeeper(byte[] data, boolean labelsOrUserAuths) {
String znode = this.labelZnode;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index fbbd704b0ae..8395456cd76 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -649,7 +649,7 @@ public class RestoreSnapshotHelper {
* Clone region directory content from the snapshot info. Each region is encoded with the table
* name, so the cloned region will have a different region name. Instead of copying the hfiles a
* HFileLink is created.
- * @param regionDir {@link Path} cloned dir n
+ * @param regionDir {@link Path} cloned dir
*/
private void cloneRegion(final RegionInfo newRegionInfo, final Path regionDir,
final RegionInfo snapshotRegionInfo, final SnapshotRegionManifest manifest) throws IOException {
@@ -696,7 +696,7 @@ public class RestoreSnapshotHelper {
* Clone region directory content from the snapshot info. Each region is encoded with the table
* name, so the cloned region will have a different region name. Instead of copying the hfiles a
* HFileLink is created.
- * @param region {@link HRegion} cloned n
+ * @param region {@link HRegion} cloned
*/
private void cloneRegion(final HRegion region, final RegionInfo snapshotRegionInfo,
final SnapshotRegionManifest manifest) throws IOException {
@@ -861,7 +861,7 @@ public class RestoreSnapshotHelper {
}
/**
- * Copy the snapshot files for a snapshot scanner, discards meta changes. nnnnnn
+ * Copy the snapshot files for a snapshot scanner, discards meta changes.
*/
public static RestoreMetaChanges copySnapshotForScanner(Configuration conf, FileSystem fs,
Path rootDir, Path restoreDir, String snapshotName) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
index 6564809303a..474c437f78b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
@@ -95,8 +95,7 @@ public final class SnapshotDescriptionUtils {
public static class CompletedSnaphotDirectoriesFilter extends FSUtils.BlackListDirFilter {
/**
- * n
- */
+ * */
public CompletedSnaphotDirectoriesFilter(FileSystem fs) {
super(fs, Collections.singletonList(SNAPSHOT_TMP_DIR_NAME));
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java
index f0453b01112..94c58dde4e0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java
@@ -42,7 +42,7 @@ public abstract class BloomContext {
}
/**
- * Bloom information from the cell is retrieved nn
+ * Bloom information from the cell is retrieved
*/
public void writeBloom(Cell cell) throws IOException {
// only add to the bloom filter on a new, unique key
@@ -62,7 +62,7 @@ public abstract class BloomContext {
}
/**
- * Adds the last bloom key to the HFile Writer as part of StorefileWriter close. nn
+ * Adds the last bloom key to the HFile Writer as part of StorefileWriter close.
*/
public abstract void addLastBloomKey(HFile.Writer writer) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java
index 9478a99c9b7..e57c302e6b6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java
@@ -46,8 +46,8 @@ import org.apache.yetus.audience.InterfaceAudience;
* number of bits in the Bloom filter (bitSize) n denotes the number of elements inserted into the
* Bloom filter (maxKeys) k represents the number of hash functions used (nbHash) e represents the
* desired false positive rate for the bloom (err) If we fix the error rate (e) and know the number
- * of entries, then the optimal bloom size m = -(n * ln(err) / (ln(2)^2) ~= n * ln(err) / ln(0.6185)
- * The probability of false positives is minimized when k = m/n ln(2).
+ * of entries, then the optimal bloom size m = -(n * ln(err) / (ln(2)^2) ~= ln(err) / ln(0.6185) The
+ * probability of false positives is minimized when k = m/n ln(2).
* @see BloomFilter The general behavior of a filter
* @see Space/Time
* Trade-Offs in Hash Coding with Allowable Errors
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java
index e09420cf805..bc0ca049093 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java
@@ -92,7 +92,7 @@ public class BloomFilterChunk implements BloomFilterBase {
* @param hashType Type of hash function to use
* @param foldFactor When finished adding entries, you may be able to 'fold' this bloom to save
* space. Tradeoff potentially excess bytes in bloom for ability to fold if
- * keyCount is exponentially greater than maxKeys. n
+ * keyCount is exponentially greater than maxKeys.
*/
// Used only in testcases
public BloomFilterChunk(int maxKeys, double errorRate, int hashType, int foldFactor)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
index da3adbc8558..ed0e51f84e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
@@ -82,7 +82,7 @@ public final class BloomFilterFactory {
* data.
* @param meta the byte array holding the Bloom filter's metadata, including version information
* @param reader the {@link HFile} reader to use to lazily load Bloom filter blocks
- * @return an instance of the correct type of Bloom filter n
+ * @return an instance of the correct type of Bloom filter
*/
public static BloomFilter createFromMeta(DataInput meta, HFile.Reader reader)
throws IllegalArgumentException, IOException {
@@ -135,10 +135,10 @@ public final class BloomFilterFactory {
/**
* Creates a new general (Row or RowCol) Bloom filter at the time of
- * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing. nnn * @param maxKeys an
- * estimate of the number of keys we expect to insert. Irrelevant if compound Bloom filters are
- * enabled.
- * @param writer the HFile writer
+ * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing.
+ * @param maxKeys an estimate of the number of keys we expect to insert. Irrelevant if compound
+ * Bloom filters are enabled.
+ * @param writer the HFile writer
* @return the new Bloom filter, or null in case Bloom filters are disabled or when failed to
* create one.
*/
@@ -176,10 +176,10 @@ public final class BloomFilterFactory {
/**
* Creates a new Delete Family Bloom filter at the time of
- * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing. nn * @param maxKeys an
- * estimate of the number of keys we expect to insert. Irrelevant if compound Bloom filters are
- * enabled.
- * @param writer the HFile writer
+ * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing.
+ * @param maxKeys an estimate of the number of keys we expect to insert. Irrelevant if compound
+ * Bloom filters are enabled.
+ * @param writer the HFile writer
* @return the new Bloom filter, or null in case Bloom filters are disabled or when failed to
* create one.
*/
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java
index 7b8a5cd241a..5b24a271474 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java
@@ -60,9 +60,9 @@ public final class BloomFilterUtil {
}
/**
- * nn * @return the number of bits for a Bloom filter than can hold the given number of keys and
- * provide the given error rate, assuming that the optimal number of hash functions is used and it
- * does not have to be an integer.
+ * @return the number of bits for a Bloom filter than can hold the given number of keys and
+ * provide the given error rate, assuming that the optimal number of hash functions is
+ * used and it does not have to be an integer.
*/
public static long computeBitSize(long maxKeys, double errorRate) {
return (long) Math.ceil(maxKeys * (-Math.log(errorRate) / LOG2_SQUARED));
@@ -85,8 +85,8 @@ public final class BloomFilterUtil {
/**
* The maximum number of keys we can put into a Bloom filter of a certain size to maintain the
* given error rate, assuming the number of hash functions is chosen optimally and does not even
- * have to be an integer (hence the "ideal" in the function name). nn * @return maximum number of
- * keys that can be inserted into the Bloom filter
+ * have to be an integer (hence the "ideal" in the function name).
+ * @return maximum number of keys that can be inserted into the Bloom filter
* @see #computeMaxKeys(long, double, int) for a more precise estimate
*/
public static long idealMaxKeys(long bitSize, double errorRate) {
@@ -97,9 +97,9 @@ public final class BloomFilterUtil {
/**
* The maximum number of keys we can put into a Bloom filter of a certain size to get the given
- * error rate, with the given number of hash functions. nnn * @return the maximum number of keys
- * that can be inserted in a Bloom filter to maintain the target error rate, if the number of hash
- * functions is provided.
+ * error rate, with the given number of hash functions.
+ * @return the maximum number of keys that can be inserted in a Bloom filter to maintain the
+ * target error rate, if the number of hash functions is provided.
*/
public static long computeMaxKeys(long bitSize, double errorRate, int hashCount) {
return (long) (-bitSize * 1.0 / hashCount
@@ -110,7 +110,8 @@ public final class BloomFilterUtil {
* Computes the actual error rate for the given number of elements, number of bits, and number of
* hash functions. Taken directly from the
* Wikipedia
- * Bloom filter article. nnn * @return the actual error rate
+ * Bloom filter article.
+ * @return the actual error rate
*/
public static double actualErrorRate(long maxKeys, long bitSize, int functionCount) {
return Math
@@ -118,8 +119,8 @@ public final class BloomFilterUtil {
}
/**
- * Increases the given byte size of a Bloom filter until it can be folded by the given factor. nn
- * * @return Foldable byte size
+ * Increases the given byte size of a Bloom filter until it can be folded by the given factor.
+ * @return Foldable byte size
*/
public static int computeFoldableByteSize(long bitSize, int foldFactor) {
long byteSizeLong = (bitSize + 7) / 8;
@@ -150,8 +151,8 @@ public final class BloomFilterUtil {
* @param byteSizeHint the desired number of bytes for the Bloom filter bit array. Will be
* increased so that folding is possible.
* @param errorRate target false positive rate of the Bloom filter
- * @param hashType Bloom filter hash function type nn * @return the new Bloom filter of the
- * desired size
+ * @param hashType Bloom filter hash function type
+ * @return the new Bloom filter of the desired size
*/
public static BloomFilterChunk createBySize(int byteSizeHint, double errorRate, int hashType,
int foldFactor, BloomType bloomType) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java
index 15f94ecc46b..c0a9685d13e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java
@@ -134,8 +134,8 @@ public class DirectMemoryUtils {
* Every once a while, the JVM checks the reference queue and cleans the DirectByteBuffers.
* However, as this doesn't happen immediately after discarding all references to a
* DirectByteBuffer, it's easy to OutOfMemoryError yourself using DirectByteBuffers. This function
- * explicitly calls the Cleaner method of a DirectByteBuffer. n * The DirectByteBuffer that will
- * be "cleaned". Utilizes reflection.
+ * explicitly calls the Cleaner method of a DirectByteBuffer. The DirectByteBuffer that will be
+ * "cleaned". Utilizes reflection.
*/
public static void destroyDirectByteBuffer(ByteBuffer toBeDestroyed)
throws IllegalArgumentException, IllegalAccessException, InvocationTargetException,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java
index faacab9cb92..192343ae41d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java
@@ -45,7 +45,7 @@ public class EncryptionTest {
}
/**
- * Check that the configured key provider can be loaded and initialized, or throw an exception. nn
+ * Check that the configured key provider can be loaded and initialized, or throw an exception.
*/
public static void testKeyProvider(final Configuration conf) throws IOException {
String providerClassName =
@@ -67,7 +67,6 @@ public class EncryptionTest {
/**
* Check that the configured cipher provider can be loaded and initialized, or throw an exception.
- * nn
*/
public static void testCipherProvider(final Configuration conf) throws IOException {
String providerClassName =
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index 59c5c032567..a3b1e375fbb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -131,8 +131,8 @@ public final class FSUtils {
* Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the
* '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true. Does not consider
* schema; i.e. if schemas different but path or subpath matches, the two will equate.
- * @param pathToSearch Path we will be trying to match. n * @return True if pathTail
- * is tail on the path of pathToSearch
+ * @param pathToSearch Path we will be trying to match.
+ * @return True if pathTail is tail on the path of pathToSearch
*/
public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
Path tailPath = pathTail;
@@ -164,7 +164,7 @@ public final class FSUtils {
/**
* Delete the region directory if exists.
- * @return True if deleted the region directory. n
+ * @return True if deleted the region directory.
*/
public static boolean deleteRegionDir(final Configuration conf, final RegionInfo hri)
throws IOException {
@@ -252,14 +252,14 @@ public final class FSUtils {
/**
* Inquire the Active NameNode's safe mode status.
* @param dfs A DistributedFileSystem object representing the underlying HDFS.
- * @return whether we're in safe mode n
+ * @return whether we're in safe mode
*/
private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
return dfs.setSafeMode(SAFEMODE_GET, true);
}
/**
- * Check whether dfs is in safemode. nn
+ * Check whether dfs is in safemode.
*/
public static void checkDfsSafeMode(final Configuration conf) throws IOException {
boolean isInSafeMode = false;
@@ -565,8 +565,7 @@ public final class FSUtils {
}
/**
- * nn
- */
+ * */
private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
final ClusterId cid) throws IOException {
// Rewrite the file as pb. Move aside the old one first, write new
@@ -931,8 +930,8 @@ public final class FSUtils {
}
/**
- * nn * @return All the table directories under rootdir . Ignore non table hbase
- * folders such as .logs, .oldlogs, .corrupt folders. n
+ * @return All the table directories under rootdir . Ignore non table hbase folders
+ * such as .logs, .oldlogs, .corrupt folders.
*/
public static List getLocalTableDirs(final FileSystem fs, final Path rootdir)
throws IOException {
@@ -978,7 +977,7 @@ public final class FSUtils {
* .tableinfo
* @param fs A file system for the Path
* @param tableDir Path to a specific table directory <hbase.rootdir>/<tabledir>
- * @return List of paths to valid region directories in table dir. n
+ * @return List of paths to valid region directories in table dir.
*/
public static List getRegionDirs(final FileSystem fs, final Path tableDir)
throws IOException {
@@ -1043,7 +1042,7 @@ public final class FSUtils {
* Given a particular region dir, return all the familydirs inside it
* @param fs A file system for the Path
* @param regionDir Path to a specific region directory
- * @return List of paths to valid family directories in region dir. n
+ * @return List of paths to valid family directories in region dir.
*/
public static List getFamilyDirs(final FileSystem fs, final Path regionDir)
throws IOException {
@@ -1191,7 +1190,7 @@ public final class FSUtils {
* @param hbaseRootDir The root directory to scan.
* @param tableName name of the table to scan.
* @return Map keyed by StoreFile name with a value of the full Path.
- * @throws IOException When scanning the directory fails. n
+ * @throws IOException When scanning the directory fails.
*/
public static Map getTableStoreFilePathMap(Map map,
final FileSystem fs, final Path hbaseRootDir, TableName tableName)
@@ -1447,7 +1446,7 @@ public final class FSUtils {
* @param progressReporter Instance or null; gets called every time we move to new region of
* family dir and for each store file.
* @return Map keyed by StoreFile name with a value of the full Path.
- * @throws IOException When scanning the directory fails. n
+ * @throws IOException When scanning the directory fails.
*/
public static Map getTableStoreFilePathMap(final FileSystem fs,
final Path hbaseRootDir, PathFilter sfFilter, ExecutorService executor,
@@ -1533,10 +1532,10 @@ public final class FSUtils {
/**
* This function is to scan the root path of the file system to get the degree of locality for
* each region on each of the servers having at least one block of that region. This is used by
- * the tool {@link org.apache.hadoop.hbase.master.RegionPlacementMaintainer} n * the configuration
- * to use
- * @return the mapping from region encoded name to a map of server names to locality fraction n *
- * in case of file system errors or interrupts
+ * the tool {@link org.apache.hadoop.hbase.master.RegionPlacementMaintainer} the configuration to
+ * use
+ * @return the mapping from region encoded name to a map of server names to locality fraction in
+ * case of file system errors or interrupts
*/
public static Map>
getRegionDegreeLocalityMappingFromFS(final Configuration conf) throws IOException {
@@ -1547,11 +1546,10 @@ public final class FSUtils {
/**
* This function is to scan the root path of the file system to get the degree of locality for
- * each region on each of the servers having at least one block of that region. n * the
- * configuration to use n * the table you wish to scan locality for n * the thread pool size to
- * use
- * @return the mapping from region encoded name to a map of server names to locality fraction n *
- * in case of file system errors or interrupts
+ * each region on each of the servers having at least one block of that region. the configuration
+ * to use the table you wish to scan locality for the thread pool size to use
+ * @return the mapping from region encoded name to a map of server names to locality fraction in
+ * case of file system errors or interrupts
*/
public static Map> getRegionDegreeLocalityMappingFromFS(
final Configuration conf, final String desiredTable, int threadPoolSize) throws IOException {
@@ -1564,9 +1562,9 @@ public final class FSUtils {
* This function is to scan the root path of the file system to get either the mapping between the
* region name and its best locality region server or the degree of locality of each region on
* each of the servers having at least one block of that region. The output map parameters are
- * both optional. n * the configuration to use n * the table you wish to scan locality for n * the
- * thread pool size to use n * the map into which to put the locality degree mapping or null, must
- * be a thread-safe implementation n * in case of file system errors or interrupts
+ * both optional. the configuration to use the table you wish to scan locality for the thread pool
+ * size to use the map into which to put the locality degree mapping or null, must be a
+ * thread-safe implementation in case of file system errors or interrupts
*/
private static void getRegionLocalityMappingFromFS(final Configuration conf,
final String desiredTable, int threadPoolSize,
@@ -1666,7 +1664,7 @@ public final class FSUtils {
/**
* Do our short circuit read setup. Checks buffer size to use and whether to do checksumming in
- * hbase or hdfs. n
+ * hbase or hdfs.
*/
public static void setupShortCircuitRead(final Configuration conf) {
// Check that the user has not set the "dfs.client.read.shortcircuit.skip.checksum" property.
@@ -1686,7 +1684,7 @@ public final class FSUtils {
}
/**
- * Check if short circuit read buffer size is set and if not, set it to hbase value. n
+ * Check if short circuit read buffer size is set and if not, set it to hbase value.
*/
public static void checkShortCircuitReadBufferSize(final Configuration conf) {
final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
@@ -1702,8 +1700,7 @@ public final class FSUtils {
}
/**
- * n * @return The DFSClient DFSHedgedReadMetrics instance or null if can't be found or not on
- * hdfs. n
+ * Returns The DFSClient DFSHedgedReadMetrics instance or null if can't be found or not on hdfs.
*/
public static DFSHedgedReadMetrics getDFSHedgedReadMetrics(final Configuration c)
throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 75572b4c53e..382da13759a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -341,8 +341,7 @@ public class HBaseFsck extends Configured implements Closeable {
}
/**
- * Constructor n * Configuration object n * if the master is not running n * if unable to connect
- * to ZooKeeper
+ * Constructor Configuration object if the master is not running if unable to connect to ZooKeeper
*/
public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,
ZooKeeperConnectionException, IOException, ClassNotFoundException {
@@ -1320,8 +1319,8 @@ public class HBaseFsck extends Configured implements Closeable {
}
/**
- * To get the column family list according to the column family dirs nn * @return a set of column
- * families n
+ * To get the column family list according to the column family dirs
+ * @return a set of column families
*/
private Set getColumnFamilyList(Set columns, HbckRegionInfo hbi)
throws IOException {
@@ -1341,7 +1340,6 @@ public class HBaseFsck extends Configured implements Closeable {
* 2. the correct colfamily list
* 3. the default properties for both {@link TableDescriptor} and
* {@link ColumnFamilyDescriptor}
- * n
*/
private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName,
Set columns) throws IOException {
@@ -1356,7 +1354,6 @@ public class HBaseFsck extends Configured implements Closeable {
/**
* To fix the empty REGIONINFO_QUALIFIER rows from hbase:meta
- * n
*/
public void fixEmptyMetaCells() throws IOException {
if (shouldFixEmptyMetaCells() && !emptyRegionInfoQualifiers.isEmpty()) {
@@ -1377,7 +1374,6 @@ public class HBaseFsck extends Configured implements Closeable {
* 2.2 the correct colfamily list
* 2.3 the default properties for both {@link TableDescriptor} and
* {@link ColumnFamilyDescriptor}
- * n
*/
public void fixOrphanTables() throws IOException {
if (shouldFixTableOrphans() && !orphanTableDirs.isEmpty()) {
@@ -1557,7 +1553,7 @@ public class HBaseFsck extends Configured implements Closeable {
}
/**
- * Load the list of disabled tables in ZK into local set. nn
+ * Load the list of disabled tables in ZK into local set.
*/
private void loadTableStates() throws IOException {
tableStates = MetaTableAccessor.getTableStates(connection);
@@ -2325,7 +2321,7 @@ public class HBaseFsck extends Configured implements Closeable {
/**
* Checks tables integrity. Goes over all regions and scans the tables. Collects all the pieces
- * for each table and checks if there are missing, repeated or overlapping ones. n
+ * for each table and checks if there are missing, repeated or overlapping ones.
*/
SortedMap checkIntegrity() throws IOException {
tablesInfo = new TreeMap<>();
@@ -2575,7 +2571,7 @@ public class HBaseFsck extends Configured implements Closeable {
* Check values in regionInfo for hbase:meta Check if zero or more than one regions with
* hbase:meta are found. If there are inconsistencies (i.e. zero or more than one regions pretend
* to be holding the hbase:meta) try to fix that and report an error.
- * @throws IOException from HBaseFsckRepair functions nn
+ * @throws IOException from HBaseFsckRepair functions
*/
boolean checkMetaRegion() throws IOException, KeeperException, InterruptedException {
Map metaRegions = new HashMap<>();
@@ -2860,7 +2856,7 @@ public class HBaseFsck extends Configured implements Closeable {
/**
* Report error information, but do not increment the error count. Intended for cases where the
- * actual error would have been reported previously. n
+ * actual error would have been reported previously.
*/
@Override
public synchronized void report(String message) {
@@ -3516,7 +3512,7 @@ public class HBaseFsck extends Configured implements Closeable {
}
/**
- * Main program nn
+ * Main program
*/
public static void main(String[] args) throws Exception {
// create a fsck object
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index 06b73c67a31..7095412cc9d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -76,7 +76,7 @@ public class HBaseFsckRepair {
/**
* Fix unassigned by creating/transition the unassigned ZK node for this region to OFFLINE state
* with a special flag to tell the master that this is a forced operation by HBCK. This assumes
- * that info is in META. nnnn
+ * that info is in META.
*/
public static void fixUnassigned(Admin admin, RegionInfo region)
throws IOException, KeeperException, InterruptedException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
index 25eab87a08a..d7b06383e1e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
@@ -73,7 +73,8 @@ public class JVMClusterUtil {
* Creates a {@link RegionServerThread}. Call 'start' on the returned thread to make it run.
* @param c Configuration to use.
* @param hrsc Class to create.
- * @param index Used distinguishing the object returned. n * @return Region server added.
+ * @param index Used distinguishing the object returned.
+ * @return Region server added.
*/
public static JVMClusterUtil.RegionServerThread createRegionServerThread(final Configuration c,
final Class extends HRegionServer> hrsc, final int index) throws IOException {
@@ -113,7 +114,8 @@ public class JVMClusterUtil {
* Creates a {@link MasterThread}. Call 'start' on the returned thread to make it run.
* @param c Configuration to use.
* @param hmc Class to create.
- * @param index Used distinguishing the object returned. n * @return Master added.
+ * @param index Used distinguishing the object returned.
+ * @return Master added.
*/
public static JVMClusterUtil.MasterThread createMasterThread(final Configuration c,
final Class extends HMaster> hmc, final int index) throws IOException {
@@ -148,7 +150,7 @@ public class JVMClusterUtil {
/**
* Start the cluster. Waits until there is a primary master initialized and returns its address.
- * nn * @return Address to use contacting primary master.
+ * @return Address to use contacting primary master.
*/
public static String startup(final List masters,
final List regionservers) throws IOException {
@@ -229,8 +231,7 @@ public class JVMClusterUtil {
}
/**
- * nn
- */
+ * */
public static void shutdown(final List masters,
final List regionservers) {
LOG.debug("Shutting down HBase Cluster");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
index 772b89bc135..564c46ad5bf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
@@ -88,7 +88,7 @@ public abstract class ModifyRegionUtils {
* @param rootDir Root directory for HBase instance
* @param tableDescriptor description of the table
* @param newRegions {@link RegionInfo} that describes the regions to create
- * @param task {@link RegionFillTask} custom code to populate region after creation n
+ * @param task {@link RegionFillTask} custom code to populate region after creation
*/
public static List createRegions(final Configuration conf, final Path rootDir,
final TableDescriptor tableDescriptor, final RegionInfo[] newRegions, final RegionFillTask task)
@@ -112,7 +112,7 @@ public abstract class ModifyRegionUtils {
* @param rootDir Root directory for HBase instance
* @param tableDescriptor description of the table
* @param newRegions {@link RegionInfo} that describes the regions to create
- * @param task {@link RegionFillTask} custom code to populate region after creation n
+ * @param task {@link RegionFillTask} custom code to populate region after creation
*/
public static List createRegions(final ThreadPoolExecutor exec,
final Configuration conf, final Path rootDir, final TableDescriptor tableDescriptor,
@@ -149,7 +149,7 @@ public abstract class ModifyRegionUtils {
* @param rootDir Root directory for HBase instance
* @param tableDescriptor description of the table
* @param newRegion {@link RegionInfo} that describes the region to create
- * @param task {@link RegionFillTask} custom code to populate region after creation n
+ * @param task {@link RegionFillTask} custom code to populate region after creation
*/
public static RegionInfo createRegion(final Configuration conf, final Path rootDir,
final TableDescriptor tableDescriptor, final RegionInfo newRegion, final RegionFillTask task)
@@ -176,7 +176,7 @@ public abstract class ModifyRegionUtils {
* Execute the task on the specified set of regions.
* @param exec Thread Pool Executor
* @param regions {@link RegionInfo} that describes the regions to edit
- * @param task {@link RegionFillTask} custom code to edit the region n
+ * @param task {@link RegionFillTask} custom code to edit the region
*/
public static void editRegions(final ThreadPoolExecutor exec,
final Collection regions, final RegionEditTask task) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java
index f3e731f5333..00655f42a75 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java
@@ -87,7 +87,7 @@ public class MunkresAssignment {
* Construct a new problem instance with the specified cost matrix. The cost matrix must be
* rectangular, though not necessarily square. If one dimension is greater than the other, some
* elements in the greater dimension will not be assigned. The input cost matrix will not be
- * modified. n
+ * modified.
*/
public MunkresAssignment(float[][] costMatrix) {
// The algorithm assumes that the number of columns is at least as great as
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
index b7285fb9f94..4de8ecc88c8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
@@ -200,8 +200,8 @@ public class RegionMover extends AbstractHBaseTool implements Closeable {
}
/**
- * Path of file where regions will be written to during unloading/read from during loading n
- * * @return RegionMoverBuilder object
+ * Path of file where regions will be written to during unloading/read from during loading
+ * @return RegionMoverBuilder object
*/
public RegionMoverBuilder filename(String filename) {
this.filename = filename;
@@ -246,7 +246,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable {
* effort mode,each region movement is tried once.This can be used during graceful shutdown as
* even if we have a stuck region,upon shutdown it'll be reassigned anyway.
*
- * n * @return RegionMoverBuilder object
+ * @return RegionMoverBuilder object
*/
public RegionMoverBuilder ack(boolean ack) {
this.ack = ack;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index 2f56426f68b..c7e9166b54a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -145,15 +145,14 @@ public class RegionSplitter {
*/
public interface SplitAlgorithm {
/**
- * Split a pre-existing region into 2 regions. n * first row (inclusive) n * last row
- * (exclusive)
+ * Split a pre-existing region into 2 regions. first row (inclusive) last row (exclusive)
* @return the split row to use
*/
byte[] split(byte[] start, byte[] end);
/**
- * Split an entire table. n * number of regions to split the table into n * user input is
- * validated at this time. may throw a runtime exception in response to a parse failure
+ * Split an entire table. number of regions to split the table into user input is validated at
+ * this time. may throw a runtime exception in response to a parse failure
* @return array of split keys for the initial regions of the table. The length of the returned
* array should be numRegions-1.
*/
@@ -187,7 +186,7 @@ public class RegionSplitter {
/**
* In HBase, the last row is represented by an empty byte array. Set this value to help the
- * split code understand how to evenly divide the first region. n * raw user input (may throw
+ * split code understand how to evenly divide the first region. raw user input (may throw
* RuntimeException on parse failure)
*/
void setFirstRow(String userInput);
@@ -195,19 +194,19 @@ public class RegionSplitter {
/**
* In HBase, the last row is represented by an empty byte array. Set this value to help the
* split code understand how to evenly divide the last region. Note that this last row is
- * inclusive for all rows sharing the same prefix. n * raw user input (may throw
- * RuntimeException on parse failure)
+ * inclusive for all rows sharing the same prefix. raw user input (may throw RuntimeException on
+ * parse failure)
*/
void setLastRow(String userInput);
/**
- * n * user or file input for row
+ * user or file input for row
* @return byte array representation of this row for HBase
*/
byte[] strToRow(String input);
/**
- * n * byte array representing a row in HBase
+ * byte array representing a row in HBase
* @return String to use for debug & file printing
*/
String rowToStr(byte[] row);
@@ -251,10 +250,9 @@ public class RegionSplitter {
*
* There are three SplitAlgorithms built into RegionSplitter, HexStringSplit, DecimalStringSplit,
* and UniformSplit. These are different strategies for choosing region boundaries. See their
- * source code for details. n * Usage: RegionSplitter <TABLE> <SPLITALGORITHM> <-c
+ * source code for details. Usage: RegionSplitter <TABLE> <SPLITALGORITHM> <-c
* <# regions> -f <family:family:...> | -r [-o <# outstanding splits>]> [-D
- * <conf.param=value>] n * HBase IO problem n * user requested exit n * problem parsing user
- * input
+ * <conf.param=value>] HBase IO problem user requested exit problem parsing user input
*/
@SuppressWarnings("static-access")
public static void main(String[] args) throws IOException, InterruptedException, ParseException {
@@ -375,8 +373,8 @@ public class RegionSplitter {
}
/**
- * Alternative getCurrentNrHRS which is no longer available. n * @return Rough count of
- * regionservers out on cluster.
+ * Alternative getCurrentNrHRS which is no longer available.
+ * @return Rough count of regionservers out on cluster.
* @throws IOException if a remote or network exception occurs
*/
private static int getRegionServerCount(final Connection connection) throws IOException {
@@ -712,7 +710,7 @@ public class RegionSplitter {
}
/**
- * nn * @return A Pair where first item is table dir and second is the split file.
+ * @return A Pair where first item is table dir and second is the split file.
* @throws IOException if a remote or network exception occurs
*/
private static Pair getTableDirAndSplitFile(final Configuration conf,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java
index 4eedfe083e1..f63208cd489 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java
@@ -41,7 +41,7 @@ public class RollingStatCalculator {
private long[] dataValues;
/**
- * Creates a RollingStatCalculator with given number of rolling periods. n
+ * Creates a RollingStatCalculator with given number of rolling periods.
*/
public RollingStatCalculator(int rollingPeriod) {
this.rollingPeriod = rollingPeriod;
@@ -53,7 +53,7 @@ public class RollingStatCalculator {
}
/**
- * Inserts given data value to array of data values to be considered for statistics calculation n
+ * Inserts given data value to array of data values to be considered for statistics calculation
*/
public void insertDataValue(long data) {
// if current number of data points already equals rolling period and rolling period is
@@ -71,7 +71,7 @@ public class RollingStatCalculator {
}
/**
- * Update the statistics after removing the given data value n
+ * Update the statistics after removing the given data value
*/
private void removeData(long data) {
currentSum = currentSum - (double) data;
@@ -91,9 +91,7 @@ public class RollingStatCalculator {
return Math.sqrt(variance);
}
- /**
- * n * @return an array of given size initialized with zeros
- */
+ /** Returns an array of given size initialized with zeros */
private long[] fillWithZeros(int size) {
long[] zeros = new long[size];
for (int i = 0; i < size; i++) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
index 173c202e2d3..5f4ddb3821e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
@@ -92,7 +92,8 @@ public class ZKDataMigrator {
* @param tableName table we're checking
* @return Null or
* {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State}
- * found in znode. n * @deprecated Since 2.0.0. To be removed in hbase-3.0.0.
+ * found in znode.
+ * @deprecated Since 2.0.0. To be removed in hbase-3.0.0.
*/
@Deprecated
private static ZooKeeperProtos.DeprecatedTableState.State getTableState(final ZKWatcher zkw,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
index 479a2425c4b..9796669d5db 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
@@ -86,8 +86,8 @@ public class HFileCorruptionChecker {
}
/**
- * Checks a path to see if it is a valid hfile. n * full Path to an HFile n * This is a
- * connectivity related exception
+ * Checks a path to see if it is a valid hfile. full Path to an HFile This is a connectivity
+ * related exception
*/
protected void checkHFile(Path p) throws IOException {
HFile.Reader r = null;
@@ -121,7 +121,7 @@ public class HFileCorruptionChecker {
/**
* Given a path, generates a new path to where we move a corrupted hfile (bad trailer, no
- * trailer). n * Path to a corrupt hfile (assumes that it is HBASE_DIR/ table /region/cf/file)
+ * trailer). Path to a corrupt hfile (assumes that it is HBASE_DIR/ table /region/cf/file)
* @return path to where corrupted files are stored. This should be
* HBASE_DIR/.corrupt/table/region/cf/file.
*/
@@ -144,7 +144,7 @@ public class HFileCorruptionChecker {
}
/**
- * Check all files in a column family dir. n * column family directory n
+ * Check all files in a column family dir. column family directory
*/
protected void checkColFamDir(Path cfDir) throws IOException {
FileStatus[] statuses = null;
@@ -176,7 +176,7 @@ public class HFileCorruptionChecker {
}
/**
- * Check all files in a mob column family dir. n * mob column family directory n
+ * Check all files in a mob column family dir. mob column family directory
*/
protected void checkMobColFamDir(Path cfDir) throws IOException {
FileStatus[] statuses = null;
@@ -208,8 +208,8 @@ public class HFileCorruptionChecker {
}
/**
- * Checks a path to see if it is a valid mob file. n * full Path to a mob file. n * This is a
- * connectivity related exception
+ * Checks a path to see if it is a valid mob file. full Path to a mob file. This is a connectivity
+ * related exception
*/
protected void checkMobFile(Path p) throws IOException {
HFile.Reader r = null;
@@ -243,7 +243,7 @@ public class HFileCorruptionChecker {
/**
* Checks all the mob files of a table.
- * @param regionDir The mob region directory n
+ * @param regionDir The mob region directory
*/
private void checkMobRegionDir(Path regionDir) throws IOException {
if (!fs.exists(regionDir)) {
@@ -277,7 +277,7 @@ public class HFileCorruptionChecker {
}
/**
- * Check all column families in a region dir. n * region directory n
+ * Check all column families in a region dir. region directory
*/
protected void checkRegionDir(Path regionDir) throws IOException {
FileStatus[] statuses = null;
@@ -309,7 +309,7 @@ public class HFileCorruptionChecker {
}
/**
- * Check all the regiondirs in the specified tableDir n * path to a table n
+ * Check all the regiondirs in the specified tableDir path to a table
*/
void checkTableDir(Path tableDir) throws IOException {
List rds =
@@ -488,7 +488,7 @@ public class HFileCorruptionChecker {
}
/**
- * Print a human readable summary of hfile quarantining operations. n
+ * Print a human readable summary of hfile quarantining operations.
*/
public void report(HbckErrorReporter out) {
out.print("Checked " + hfilesChecked.get() + " hfile for corruption");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
index 21accbcf99f..6bd88e2d52c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
@@ -67,8 +67,8 @@ public interface WAL extends Closeable, WALFileLengthProvider {
* Roll the log writer. That is, start writing log messages to a new file.
*
* The implementation is synchronized in order to make sure there's one rollWriter running at any
- * given time. n * If true, force creation of a new writer even if no entries have been written to
- * the current writer
+ * given time. If true, force creation of a new writer even if no entries have been written to the
+ * current writer
* @return If lots of logs, flush the stores of returned regions so next time through we can clean
* logs. Returns null if nothing to flush. Names are actual region names as returned by
* {@link RegionInfo#getEncodedName()}
@@ -269,14 +269,14 @@ public interface WAL extends Closeable, WALFileLengthProvider {
}
/**
- * Gets the edit n
+ * Gets the edit
*/
public WALEdit getEdit() {
return edit;
}
/**
- * Gets the key n
+ * Gets the key
*/
public WALKeyImpl getKey() {
return key;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
index 146013ce1a9..cab96fe0dd1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
@@ -250,7 +250,7 @@ public class WALKeyImpl implements WALKey {
* HRegionInfo#getEncodedNameAsBytes() .
* @param tablename the tablename
* @param now Time at which this edit was written.
- * @param clusterIds the clusters that have consumed the change(used in Replication) nn
+ * @param clusterIds the clusters that have consumed the change(used in Replication) *
* * @param mvcc mvcc control used to generate sequence numbers and
* control read/write points
*/
@@ -265,8 +265,8 @@ public class WALKeyImpl implements WALKey {
* Create the log key for writing to somewhere. We maintain the tablename mainly for debugging
* purposes. A regionName is always a sub-table object.
* @param encodedRegionName Encoded name of the region as returned by
- * HRegionInfo#getEncodedNameAsBytes() . n * @param now Time
- * at which this edit was written.
+ * HRegionInfo#getEncodedNameAsBytes() .
+ * @param now Time at which this edit was written.
* @param clusterIds the clusters that have consumed the change(used in Replication)
* @param nonceGroup the nonceGroup
* @param nonce the nonce
@@ -285,7 +285,7 @@ public class WALKeyImpl implements WALKey {
* Create the log key for writing to somewhere. We maintain the tablename mainly for debugging
* purposes. A regionName is always a sub-table object.
* @param encodedRegionName Encoded name of the region as returned by
- * HRegionInfo#getEncodedNameAsBytes() . nnnn
+ * HRegionInfo#getEncodedNameAsBytes() .
*/
// TODO: Fix being able to pass in sequenceid.
public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
index 21c18964de7..15762a2c2e8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
@@ -142,8 +142,8 @@ public class WALPrettyPrinter {
}
/**
- * sets the region by which output will be filtered n * when nonnegative, serves as a filter; only
- * log entries with this sequence id will be printed
+ * sets the region by which output will be filtered when nonnegative, serves as a filter; only log
+ * entries with this sequence id will be printed
*/
public void setSequenceFilter(long sequence) {
this.sequence = sequence;
@@ -158,23 +158,23 @@ public class WALPrettyPrinter {
}
/**
- * sets the region by which output will be filtered n * when not null, serves as a filter; only
- * log entries from this region will be printed
+ * sets the region by which output will be filtered when not null, serves as a filter; only log
+ * entries from this region will be printed
*/
public void setRegionFilter(String region) {
this.region = region;
}
/**
- * sets the row key by which output will be filtered n * when not null, serves as a filter; only
- * log entries from this row will be printed
+ * sets the row key by which output will be filtered when not null, serves as a filter; only log
+ * entries from this row will be printed
*/
public void setRowFilter(String row) {
this.row = row;
}
/**
- * sets the rowPrefix key prefix by which output will be filtered n * when not null, serves as a
+ * sets the rowPrefix key prefix by which output will be filtered when not null, serves as a
* filter; only log entries with rows having this prefix will be printed
*/
public void setRowPrefixFilter(String rowPrefix) {
@@ -189,8 +189,8 @@ public class WALPrettyPrinter {
}
/**
- * sets the position to start seeking the WAL file n * initial position to start seeking the given
- * WAL file
+ * sets the position to start seeking the WAL file initial position to start seeking the given WAL
+ * file
*/
public void setPosition(long position) {
this.position = position;
@@ -226,9 +226,8 @@ public class WALPrettyPrinter {
/**
* reads a log file and outputs its contents, one transaction at a time, as specified by the
- * currently configured options n * the HBase configuration relevant to this log file n * the path
- * of the log file to be read n * may be unable to access the configured filesystem or requested
- * file.
+ * currently configured options the HBase configuration relevant to this log file the path of the
+ * log file to be read may be unable to access the configured filesystem or requested file.
*/
public void processFile(final Configuration conf, final Path p) throws IOException {
FileSystem fs = p.getFileSystem(conf);
@@ -408,8 +407,7 @@ public class WALPrettyPrinter {
/**
* Pass one or more log file names and formatting options and it will dump out a text version of
- * the contents on stdout . n * Command line arguments n * Thrown upon file system
- * errors etc.
+ * the contents on stdout . Command line arguments Thrown upon file system errors etc.
*/
public static void run(String[] args) throws IOException {
// create options
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
index 1598e6fbbb3..4c1c1a5be9b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
@@ -541,7 +541,8 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
* Start a minidfscluster. This is useful if you want to run datanode on distinct hosts for things
* like HDFS block location verification. If you start MiniDFSCluster without host names, all
* instances of the datanodes will have the same host name.
- * @param hosts hostnames DNs to run on. n * @see #shutdownMiniDFSCluster()
+ * @param hosts hostnames DNs to run on.
+ * @see #shutdownMiniDFSCluster()
* @return The mini dfs cluster created.
*/
public MiniDFSCluster startMiniDFSCluster(final String[] hosts) throws Exception {
@@ -555,7 +556,8 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
/**
* Start a minidfscluster. Can only create one.
* @param servers How many DNs to start.
- * @param hosts hostnames DNs to run on. n * @see #shutdownMiniDFSCluster()
+ * @param hosts hostnames DNs to run on.
+ * @see #shutdownMiniDFSCluster()
* @return The mini dfs cluster created.
*/
public MiniDFSCluster startMiniDFSCluster(int servers, final String[] hosts) throws Exception {
@@ -1100,7 +1102,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
* Returns the path to the default root dir the minicluster uses. If create is true,
* a new root directory path is fetched irrespective of whether it has been fetched before or not.
* If false, previous path is used. Note: this does not cause the root dir to be created.
- * @return Fully qualified path for the default hbase root dir n
+ * @return Fully qualified path for the default hbase root dir
*/
public Path getDefaultRootDirPath(boolean create) throws IOException {
if (!create) {
@@ -1113,7 +1115,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
/**
* Same as {{@link HBaseTestingUtil#getDefaultRootDirPath(boolean create)} except that
* create flag is false. Note: this does not cause the root dir to be created.
- * @return Fully qualified path for the default hbase root dir n
+ * @return Fully qualified path for the default hbase root dir
*/
public Path getDefaultRootDirPath() throws IOException {
return getDefaultRootDirPath(false);
@@ -1126,7 +1128,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
* @param create This flag decides whether to get a new root or data directory path or not, if it
* has been fetched already. Note : Directory will be made irrespective of whether
* path has been fetched or not. If directory already exists, it will be overwritten
- * @return Fully qualified path to hbase root dir n
+ * @return Fully qualified path to hbase root dir
*/
public Path createRootDir(boolean create) throws IOException {
FileSystem fs = FileSystem.get(this.conf);
@@ -1140,7 +1142,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
/**
* Same as {@link HBaseTestingUtil#createRootDir(boolean create)} except that create
* flag is false.
- * @return Fully qualified path to hbase root dir n
+ * @return Fully qualified path to hbase root dir
*/
public Path createRootDir() throws IOException {
return createRootDir(false);
@@ -1150,7 +1152,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
* Creates a hbase walDir in the user's home directory. Normally you won't make use of this
* method. Root hbaseWALDir is created for you as part of mini cluster startup. You'd only use
* this method if you were doing manual operation.
- * @return Fully qualified path to hbase root dir n
+ * @return Fully qualified path to hbase root dir
*/
public Path createWALRootDir() throws IOException {
FileSystem fs = FileSystem.get(this.conf);
@@ -1171,42 +1173,44 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
}
/**
- * Flushes all caches in the mini hbase cluster n
+ * Flushes all caches in the mini hbase cluster
*/
public void flush() throws IOException {
getMiniHBaseCluster().flushcache();
}
/**
- * Flushes all caches in the mini hbase cluster n
+ * Flushes all caches in the mini hbase cluster
*/
public void flush(TableName tableName) throws IOException {
getMiniHBaseCluster().flushcache(tableName);
}
/**
- * Compact all regions in the mini hbase cluster n
+ * Compact all regions in the mini hbase cluster
*/
public void compact(boolean major) throws IOException {
getMiniHBaseCluster().compact(major);
}
/**
- * Compact all of a table's reagion in the mini hbase cluster n
+ * Compact all of a table's reagion in the mini hbase cluster
*/
public void compact(TableName tableName, boolean major) throws IOException {
getMiniHBaseCluster().compact(tableName, major);
}
/**
- * Create a table. nn * @return A Table instance for the created table. n
+ * Create a table.
+ * @return A Table instance for the created table.
*/
public Table createTable(TableName tableName, String family) throws IOException {
return createTable(tableName, new String[] { family });
}
/**
- * Create a table. nn * @return A Table instance for the created table. n
+ * Create a table.
+ * @return A Table instance for the created table.
*/
public Table createTable(TableName tableName, String[] families) throws IOException {
List fams = new ArrayList<>(families.length);
@@ -1217,14 +1221,16 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
}
/**
- * Create a table. nn * @return A Table instance for the created table. n
+ * Create a table.
+ * @return A Table instance for the created table.
*/
public Table createTable(TableName tableName, byte[] family) throws IOException {
return createTable(tableName, new byte[][] { family });
}
/**
- * Create a table with multiple regions. nnn * @return A Table instance for the created table. n
+ * Create a table with multiple regions.
+ * @return A Table instance for the created table.
*/
public Table createMultiRegionTable(TableName tableName, byte[] family, int numRegions)
throws IOException {
@@ -1237,22 +1243,25 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
}
/**
- * Create a table. nn * @return A Table instance for the created table. n
+ * Create a table.
+ * @return A Table instance for the created table.
*/
public Table createTable(TableName tableName, byte[][] families) throws IOException {
return createTable(tableName, families, (byte[][]) null);
}
/**
- * Create a table with multiple regions. nn * @return A Table instance for the created table. n
+ * Create a table with multiple regions.
+ * @return A Table instance for the created table.
*/
public Table createMultiRegionTable(TableName tableName, byte[][] families) throws IOException {
return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE);
}
/**
- * Create a table with multiple regions. n * @param replicaCount replica count. n * @return A
- * Table instance for the created table. n
+ * Create a table with multiple regions.
+ * @param replicaCount replica count.
+ * @return A Table instance for the created table.
*/
public Table createMultiRegionTable(TableName tableName, int replicaCount, byte[][] families)
throws IOException {
@@ -1260,7 +1269,8 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
}
/**
- * Create a table. nnn * @return A Table instance for the created table. n
+ * Create a table.
+ * @return A Table instance for the created table.
*/
public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys)
throws IOException {
@@ -1359,7 +1369,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
* Create a table.
* @param htd table descriptor
* @param splitRows array of split keys
- * @return A Table instance for the created table. n
+ * @return A Table instance for the created table.
*/
public Table createTable(TableDescriptor htd, byte[][] splitRows) throws IOException {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
@@ -1693,7 +1703,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
* @param conf configuration
* @param desc table descriptor
* @param wal wal for this region.
- * @return created hregion n
+ * @return created hregion
*/
public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDescriptor desc,
WAL wal) throws IOException {
@@ -1701,8 +1711,8 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
}
/**
- * nnnnn * @return A region on which you must call
- * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} when done. n
+ * @return A region on which you must call {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)}
+ * when done.
*/
public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families)
@@ -1743,7 +1753,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
* Provide an existing table name to truncate. Scans the table and issues a delete for each row
* read.
* @param tableName existing table
- * @return HTable to that new table n
+ * @return HTable to that new table
*/
public Table deleteTableData(TableName tableName) throws IOException {
Table table = getConnection().getTable(tableName);
@@ -1790,7 +1800,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
* Load table with rows from 'aaa' to 'zzz'.
* @param t Table
* @param f Family
- * @return Count of rows loaded. n
+ * @return Count of rows loaded.
*/
public int loadTable(final Table t, final byte[] f) throws IOException {
return loadTable(t, new byte[][] { f });
@@ -1800,7 +1810,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
* Load table with rows from 'aaa' to 'zzz'.
* @param t Table
* @param f Family
- * @return Count of rows loaded. n
+ * @return Count of rows loaded.
*/
public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
return loadTable(t, new byte[][] { f }, null, writeToWAL);
@@ -1810,7 +1820,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
* Load table of multiple column families with rows from 'aaa' to 'zzz'.
* @param t Table
* @param f Array of Families to load
- * @return Count of rows loaded. n
+ * @return Count of rows loaded.
*/
public int loadTable(final Table t, final byte[][] f) throws IOException {
return loadTable(t, f, null);
@@ -1821,7 +1831,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
* @param t Table
* @param f Array of Families to load
* @param value the values of the cells. If null is passed, the row key is used as value
- * @return Count of rows loaded. n
+ * @return Count of rows loaded.
*/
public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
return loadTable(t, f, value, true);
@@ -1832,7 +1842,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
* @param t Table
* @param f Array of Families to load
* @param value the values of the cells. If null is passed, the row key is used as value
- * @return Count of rows loaded. n
+ * @return Count of rows loaded.
*/
public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL)
throws IOException {
@@ -1919,7 +1929,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
* @param r Region
* @param f Family
* @param flush flush the cache if true
- * @return Count of rows loaded. n
+ * @return Count of rows loaded.
*/
public int loadRegion(final HRegion r, final byte[] f, final boolean flush) throws IOException {
byte[] k = new byte[3];
@@ -2678,7 +2688,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
/**
* Closes the region containing the given row.
* @param row The row to find the containing region.
- * @param table The table to find the region. n
+ * @param table The table to find the region.
*/
public void unassignRegionByRow(byte[] row, RegionLocator table) throws IOException {
HRegionLocation hrl = table.getRegionLocation(row);
@@ -3649,7 +3659,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
}
/**
- * Wait until no regions in transition. (time limit 15min) n
+ * Wait until no regions in transition. (time limit 15min)
*/
public void waitUntilNoRegionsInTransition() throws IOException {
waitUntilNoRegionsInTransition(15 * 60000);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
index 4d24b743012..e60d23d1206 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
@@ -104,7 +104,7 @@ public class HFilePerformanceEvaluation {
/**
* Add any supported codec or cipher to test the HFile read/write performance. Specify "none" to
- * disable codec or cipher or both. n
+ * disable codec or cipher or both.
*/
private void runBenchmarks() throws Exception {
final Configuration conf = new Configuration();
@@ -170,9 +170,9 @@ public class HFilePerformanceEvaluation {
}
/**
- * Write a test HFile with the given codec & cipher nnn * @param codec "none", "lzo", "gz",
- * "snappy"
- * @param cipher "none", "aes" n
+ * Write a test HFile with the given codec & cipher
+ * @param codec "none", "lzo", "gz", "snappy"
+ * @param cipher "none", "aes"
*/
private void runWriteBenchmark(Configuration conf, FileSystem fs, Path mf, String codec,
String cipher) throws Exception {
@@ -186,7 +186,8 @@ public class HFilePerformanceEvaluation {
}
/**
- * Run all the read benchmarks for the test HFile nnn * @param codec "none", "lzo", "gz", "snappy"
+ * Run all the read benchmarks for the test HFile
+ * @param codec "none", "lzo", "gz", "snappy"
* @param cipher "none", "aes"
*/
private void runReadBenchmark(final Configuration conf, final FileSystem fs, final Path mf,
@@ -303,7 +304,7 @@ public class HFilePerformanceEvaluation {
/**
* Run benchmark
- * @return elapsed time. n
+ * @return elapsed time.
*/
long run() throws Exception {
long elapsedTime;
@@ -520,8 +521,7 @@ public class HFilePerformanceEvaluation {
}
/**
- * nnn
- */
+ * */
public static void main(String[] args) throws Exception {
new HFilePerformanceEvaluation().runBenchmarks();
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java
index 50d67158852..ba35d318c8c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java
@@ -34,7 +34,7 @@ public class MetaMockingUtil {
* Returns a Result object constructed from the given region information simulating a catalog
* table result.
* @param region the HRegionInfo object or null
- * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. n
+ * @return A mocked up Result that fakes a Get on a row in the hbase:meta table.
*/
public static Result getMetaTableRowResult(final RegionInfo region) throws IOException {
return getMetaTableRowResult(region, null, null, null);
@@ -45,7 +45,7 @@ public class MetaMockingUtil {
* table result.
* @param region the HRegionInfo object or null
* @param sn to use making startcode and server hostname:port in meta or null
- * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. n
+ * @return A mocked up Result that fakes a Get on a row in the hbase:meta table.
*/
public static Result getMetaTableRowResult(final RegionInfo region, final ServerName sn)
throws IOException {
@@ -59,7 +59,7 @@ public class MetaMockingUtil {
* @param sn to use making startcode and server hostname:port in meta or null
* @param splita daughter region or null
* @param splitb daughter region or null
- * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. n
+ * @return A mocked up Result that fakes a Get on a row in the hbase:meta table.
*/
public static Result getMetaTableRowResult(RegionInfo region, final ServerName sn,
RegionInfo splita, RegionInfo splitb) throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
index 03cd84a3f97..07ad497e5cc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
@@ -70,7 +70,7 @@ public class TestGlobalMemStoreSize {
/**
* Test the global mem store size in the region server is equal to sum of each region's mem store
- * size n
+ * size
*/
@Test
public void testGlobalMemStore() throws Exception {
@@ -149,7 +149,7 @@ public class TestGlobalMemStoreSize {
}
/**
- * Flush and log stats on flush nnn
+ * Flush and log stats on flush
*/
private void flush(final HRegion r, final HRegionServer server) throws IOException {
LOG.info("Flush " + r.toString() + " on " + server.getServerName() + ", " + r.flush(true)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
index b5f4597e47b..1d50bfa37ba 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
@@ -136,7 +136,7 @@ public class TestPartialResultsFromClientSide {
/**
* Ensure that the expected key values appear in a result returned from a scanner that is
- * combining partial results into complete results n
+ * combining partial results into complete results
*/
@Test
public void testExpectedValuesOfPartialResults() throws Exception {
@@ -170,7 +170,7 @@ public class TestPartialResultsFromClientSide {
}
/**
- * Ensure that we only see Results marked as partial when the allowPartial flag is set n
+ * Ensure that we only see Results marked as partial when the allowPartial flag is set
*/
@Test
public void testAllowPartialResults() throws Exception {
@@ -202,7 +202,7 @@ public class TestPartialResultsFromClientSide {
/**
* Ensure that the results returned from a scanner that retrieves all results in a single RPC call
* matches the results that are returned from a scanner that must incrementally combine partial
- * results into complete results. A variety of scan configurations can be tested n
+ * results into complete results. A variety of scan configurations can be tested
*/
@Test
public void testEquivalenceOfScanResults() throws Exception {
@@ -239,7 +239,7 @@ public class TestPartialResultsFromClientSide {
}
/**
- * Order of cells in partial results matches the ordering of cells from complete results n
+ * Order of cells in partial results matches the ordering of cells from complete results
*/
@Test
public void testOrderingOfCellsInPartialResults() throws Exception {
@@ -311,7 +311,7 @@ public class TestPartialResultsFromClientSide {
/**
* Setting the max result size allows us to control how many cells we expect to see on each call
- * to next on the scanner. Test a variety of different sizes for correctness n
+ * to next on the scanner. Test a variety of different sizes for correctness
*/
@Test
public void testExpectedNumberOfCellsPerPartialResult() throws Exception {
@@ -363,7 +363,7 @@ public class TestPartialResultsFromClientSide {
/**
* @return The approximate heap size of a cell in the test table. All cells should have
* approximately the same heap size, so the value is cached to avoid repeating the
- * calculation n
+ * calculation
*/
private long getCellHeapSize() throws Exception {
if (CELL_HEAP_SIZE == -1) {
@@ -391,8 +391,8 @@ public class TestPartialResultsFromClientSide {
}
/**
- * n * @return the result size that should be used in {@link Scan#setMaxResultSize(long)} if you
- * want the server to return exactly numberOfCells cells n
+ * @return the result size that should be used in {@link Scan#setMaxResultSize(long)} if you want
+ * the server to return exactly numberOfCells cells
*/
private long getResultSizeForNumberOfCells(int numberOfCells) throws Exception {
return getCellHeapSize() * numberOfCells;
@@ -440,7 +440,7 @@ public class TestPartialResultsFromClientSide {
}
/**
- * Test the method {@link Result#createCompleteResult(Iterable)} n
+ * Test the method {@link Result#createCompleteResult(Iterable)}
*/
@Test
public void testPartialResultsReassembly() throws Exception {
@@ -541,7 +541,7 @@ public class TestPartialResultsFromClientSide {
* Examine the interaction between the maxResultSize and caching. If the caching limit is reached
* before the maxResultSize limit, we should not see partial results. On the other hand, if the
* maxResultSize limit is reached before the caching limit, it is likely that partial results will
- * be seen. n
+ * be seen.
*/
@Test
public void testPartialResultsAndCaching() throws Exception {
@@ -631,7 +631,7 @@ public class TestPartialResultsFromClientSide {
}
/**
- * Verifies that result contains all the key values within expKvList. Fails the test otherwise nnn
+ * Verifies that result contains all the key values within expKvList. Fails the test otherwise
*/
static void verifyResult(Result result, List expKvList, String msg) {
if (LOG.isInfoEnabled()) {
@@ -657,7 +657,7 @@ public class TestPartialResultsFromClientSide {
}
/**
- * Compares two results and fails the test if the results are different nnn
+ * Compares two results and fails the test if the results are different
*/
static void compareResults(Result r1, Result r2, final String message) {
if (LOG.isInfoEnabled()) {
@@ -766,7 +766,7 @@ public class TestPartialResultsFromClientSide {
* Test partial Result re-assembly in the presence of different filters. The Results from the
* partial scanner should match the Results returned from a scanner that receives all of the
* results in one RPC to the server. The partial scanner is tested with a variety of different
- * result sizes (all of which are less than the size necessary to fetch an entire row) n
+ * result sizes (all of which are less than the size necessary to fetch an entire row)
*/
@Test
public void testPartialResultsWithColumnFilter() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
index 68314ef5a9f..99a9e47b5ee 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
@@ -105,7 +105,7 @@ public class TestRegionRebalancing {
/**
* For HBASE-71. Try a few different configurations of starting and stopping region servers to see
- * if the assignment or regions is pretty balanced. nn
+ * if the assignment or regions is pretty balanced.
*/
@Test
public void testRebalanceOnRegionServerNumberChange() throws IOException, InterruptedException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
index 4d9fd6fb48d..98a3c01a8e4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
@@ -149,7 +149,7 @@ public class TestSerialization {
}
/**
- * Test RegionInfo serialization n
+ * Test RegionInfo serialization
*/
@Test
public void testRegionInfo() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java
index 801841fa708..60052e1718e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java
@@ -50,7 +50,7 @@ public class TimestampTestBase {
/*
* Run test that delete works according to description in hadoop-1784. nnn
+ * href="https://issues.apache.org/jira/browse/HADOOP-1784">hadoop-1784.
*/
public static void doTestDelete(final Table table, FlushCache flusher) throws IOException {
// Add values at various timestamps (Values are timestampes as bytes).
@@ -112,7 +112,7 @@ public class TimestampTestBase {
/*
* Assert that returned versions match passed in timestamps and that results are returned in the
* right order. Assert that values when converted to longs match the corresponding passed
- * timestamp. nnn
+ * timestamp.
*/
public static void assertVersions(final Table incommon, final long[] tss) throws IOException {
// Assert that 'latest' is what we expect.
@@ -158,7 +158,7 @@ public class TimestampTestBase {
}
/*
- * Run test scanning different timestamps. nnn
+ * Run test scanning different timestamps.
*/
public static void doTestTimestampScanning(final Table incommon, final FlushCache flusher)
throws IOException {
@@ -178,7 +178,8 @@ public class TimestampTestBase {
}
/*
- * Assert that the scan returns only values < timestamp. nn * @return Count of items scanned. n
+ * Assert that the scan returns only values < timestamp.
+ * @return Count of items scanned.
*/
public static int assertScanContentTimestamp(final Table in, final long ts) throws IOException {
Scan scan = new Scan().withStartRow(HConstants.EMPTY_START_ROW);
@@ -217,7 +218,7 @@ public class TimestampTestBase {
}
/*
- * Put values. nnnn
+ * Put values.
*/
public static void put(final Table loader, final byte[] bytes, final long ts) throws IOException {
Put put = new Put(ROW, ts);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideBase.java
index 56cc9db5648..75e6738c126 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideBase.java
@@ -211,14 +211,14 @@ class FromClientSideBase {
}
/*
- * n * @return Scan with RowFilter that does LESS than passed key.
+ * @return Scan with RowFilter that does LESS than passed key.
*/
protected Scan createScanWithRowFilter(final byte[] key) {
return createScanWithRowFilter(key, null, CompareOperator.LESS);
}
/*
- * nnn * @return Scan with RowFilter that does CompareOp op on passed key.
+ * @return Scan with RowFilter that does CompareOp op on passed key.
*/
protected Scan createScanWithRowFilter(final byte[] key, final byte[] startRow,
CompareOperator op) {
@@ -251,8 +251,8 @@ class FromClientSideBase {
/*
* Wait on table split. May return because we waited long enough on the split and it didn't
- * happen. Caller should check. n * @return Map of table regions; caller needs to check table
- * actually split.
+ * happen. Caller should check.
+ * @return Map of table regions; caller needs to check table actually split.
*/
private List waitOnSplit(final Table t) throws IOException {
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(t.getName())) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index 49abb3035a3..42581a821e3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -177,7 +177,7 @@ public class TestAdmin2 extends TestAdminBase {
}
/**
- * Test read only tables n
+ * Test read only tables
*/
@Test
public void testReadOnlyTable() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
index 4cdc2055768..ce63293fdd0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
@@ -963,7 +963,7 @@ public class TestFromClientSide3 {
* to assure cells in memstore are stamped with seqid/mvcc. If cells without mvcc(A.K.A mvcc=0)
* are put into memstore, then a scanner with a smaller readpoint can see these data, which
* disobey the multi version concurrency control rules. This test case is to reproduce this
- * scenario. n
+ * scenario.
*/
@Test
public void testMVCCUsingMVCCPreAssign() throws IOException, InterruptedException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java
index db4daeaaa1e..1b3a66ff171 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java
@@ -175,7 +175,7 @@ public class TestFromClientSideScanExcpetion {
/**
* Tests the case where a Scan can throw an IOException in the middle of the seek / reseek leaving
* the server side RegionScanner to be in dirty state. The client has to ensure that the
- * ClientScanner does not get an exception and also sees all the data. nn
+ * ClientScanner does not get an exception and also sees all the data.
*/
@Test
public void testClientScannerIsResetWhenScanThrowsIOException()
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java
index c6fa7753094..c7449850808 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java
@@ -308,7 +308,7 @@ public class TestMetaCache {
/**
* Throw some exceptions. Mostly throw exceptions which do not clear meta cache. Periodically
- * throw NotSevingRegionException which clears the meta cache. n
+ * throw NotSevingRegionException which clears the meta cache.
*/
private void throwSomeExceptions(FakeRSRpcServices rpcServices,
HBaseProtos.RegionSpecifier regionSpec) throws ServiceException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java
index b1d6272dd66..c5842629e60 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java
@@ -456,7 +456,6 @@ public class TestResult {
/**
* Microbenchmark that compares {@link Result#getValue} and {@link Result#loadValue} performance.
- * n
*/
public void doReadBenchmark() throws Exception {
@@ -518,7 +517,7 @@ public class TestResult {
}
/**
- * Calls non-functional test methods. n
+ * Calls non-functional test methods.
*/
public static void main(String[] args) {
TestResult testResult = new TestResult();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java
index d5328cf8ba0..96d43e56a59 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java
@@ -97,7 +97,7 @@ public class TestScannerTimeout {
/**
* Test that scanner can continue even if the region server it was reading from failed. Before
- * 2772, it reused the same scanner id. n
+ * 2772, it reused the same scanner id.
*/
@Test
public void test2772() throws Exception {
@@ -127,7 +127,7 @@ public class TestScannerTimeout {
/**
* Test that scanner won't miss any rows if the region server it was reading from failed. Before
- * 3686, it would skip rows in the scan. n
+ * 3686, it would skip rows in the scan.
*/
@Test
public void test3686a() throws Exception {
@@ -170,7 +170,7 @@ public class TestScannerTimeout {
/**
* Make sure that no rows are lost if the scanner timeout is longer on the client than the server,
- * and the scan times out on the server but not the client. n
+ * and the scan times out on the server but not the client.
*/
@Test
public void test3686b() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java
index 385907ea9b9..846333e5370 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java
@@ -154,7 +154,7 @@ public class TestSizeFailures {
}
/**
- * Count the number of rows and the number of entries from a scanner n * The Scanner
+ * Count the number of rows and the number of entries from a scanner The Scanner
* @return An entry where the first item is rows observed and the second is entries observed.
*/
private Entry sumTable(ResultScanner scanner) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
index b612068fabf..8abb4d754a7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
@@ -168,7 +168,6 @@ public class TestSnapshotFromClient {
/**
* Test HBaseAdmin#deleteSnapshots(String) which deletes snapshots whose names match the parameter
- * n
*/
@Test
public void testSnapshotDeletionWithRegex() throws Exception {
@@ -204,7 +203,7 @@ public class TestSnapshotFromClient {
}
/**
- * Test snapshotting a table that is offline n
+ * Test snapshotting a table that is offline
*/
@Test
public void testOfflineTableSnapshot() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java
index 767067f2d7f..0def9018e78 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java
@@ -234,7 +234,7 @@ public class TestSnapshotMetadata {
/**
* Verify that when the table is empty, making metadata changes after the restore does not affect
- * the restored table's original metadata n
+ * the restored table's original metadata
*/
@Test
public void testDescribeOnEmptyTableMatchesAfterMetadataChangeAndRestore() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java
index 6c73c761858..3fd2097a3f3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java
@@ -95,7 +95,7 @@ public class TestTimestampsFilter {
/**
* Test from client side for TimestampsFilter. The TimestampsFilter provides the ability to
* request cells (KeyValues) whose timestamp/version is in the specified list of
- * timestamps/version. n
+ * timestamps/version.
*/
@Test
public void testTimestampsFilter() throws Exception {
@@ -216,7 +216,7 @@ public class TestTimestampsFilter {
}
/**
- * Test TimestampsFilter in the presence of version deletes. n
+ * Test TimestampsFilter in the presence of version deletes.
*/
@Test
public void testWithVersionDeletes() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java
index 5b0ae464a61..614f68dcd60 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java
@@ -115,7 +115,7 @@ public class TestEntityLocks {
}
/**
- * Test basic lock function - requestLock, await, unlock. n
+ * Test basic lock function - requestLock, await, unlock.
*/
@Test
public void testEntityLock() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreMasterCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreMasterCoprocessor.java
index 4fcd3c0762b..c1ec75f0bed 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreMasterCoprocessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreMasterCoprocessor.java
@@ -87,7 +87,7 @@ public class TestCoreMasterCoprocessor {
/**
* Assert that when a Coprocessor is annotated with CoreCoprocessor, then it is possible to access
- * a MasterServices instance. Assert the opposite too. Do it to MasterCoprocessors. n
+ * a MasterServices instance. Assert the opposite too. Do it to MasterCoprocessors.
*/
@Test
public void testCoreRegionCoprocessor() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java
index 14477b84f86..6b52ce497a2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java
@@ -101,7 +101,7 @@ public class TestCoreRegionCoprocessor {
/**
* Assert that when a Coprocessor is annotated with CoreCoprocessor, then it is possible to access
- * a RegionServerServices instance. Assert the opposite too. Do it to RegionCoprocessors. n
+ * a RegionServerServices instance. Assert the opposite too. Do it to RegionCoprocessors.
*/
@Test
public void testCoreRegionCoprocessor() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionServerCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionServerCoprocessor.java
index 9d593e80cfe..ca77a57ec47 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionServerCoprocessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionServerCoprocessor.java
@@ -87,7 +87,7 @@ public class TestCoreRegionServerCoprocessor {
/**
* Assert that when a Coprocessor is annotated with CoreCoprocessor, then it is possible to access
- * a RegionServerServices instance. Assert the opposite too. Do it to RegionServerCoprocessors. n
+ * a RegionServerServices instance. Assert the opposite too. Do it to RegionServerCoprocessors.
*/
@Test
public void testCoreRegionCoprocessor() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java
index fe784bb2a4c..a05643469f4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java
@@ -196,7 +196,7 @@ public class TestOpenTableInCoprocessor {
/**
* Count the number of keyvalue in the table. Scans all possible versions
* @param table table to scan
- * @return number of keyvalues over all rows in the table n
+ * @return number of keyvalues over all rows in the table
*/
private int getKeyValueCount(Table table) throws IOException {
Scan scan = new Scan();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java
index 07dd8a81c26..3a733a66e28 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java
@@ -98,7 +98,7 @@ public class TestRegionObserverBypass {
}
/**
- * do a single put that is bypassed by a RegionObserver n
+ * do a single put that is bypassed by a RegionObserver
*/
@Test
public void testSimple() throws Exception {
@@ -112,7 +112,7 @@ public class TestRegionObserverBypass {
}
/**
- * Test various multiput operations. If the column family is 'test', then bypass is invoked. n
+ * Test various multiput operations. If the column family is 'test', then bypass is invoked.
*/
@Test
public void testMulti() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
index f44ff7ead4b..36e3ef1e0ff 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
@@ -632,7 +632,7 @@ public class TestRegionObserverInterface {
}
/**
- * Tests overriding compaction handling via coprocessor hooks n
+ * Tests overriding compaction handling via coprocessor hooks
*/
@Test
public void testCompactionOverride() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java
index 2c1baa8b960..90d8f187c98 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java
@@ -75,7 +75,7 @@ public class TestColumnPaginationFilter {
/**
* The more specific functionality tests are contained within the TestFilters class. This class is
- * mainly for testing serialization nn
+ * mainly for testing serialization
*/
private void basicFilterTests(ColumnPaginationFilter filter) throws Exception {
KeyValue c = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_1);
@@ -83,7 +83,7 @@ public class TestColumnPaginationFilter {
}
/**
- * Tests serialization n
+ * Tests serialization
*/
@Test
public void testSerialization() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
index 18234275750..72eccff47cb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
@@ -138,7 +138,7 @@ public class TestDependentColumnFilter {
/**
* This shouldn't be confused with TestFilter#verifyScan as expectedKeys is not the per row total,
- * but the scan total nnnn
+ * but the scan total
*/
private void verifyScan(Scan s, long expectedRows, long expectedCells) throws IOException {
InternalScanner scanner = this.region.getScanner(s);
@@ -213,7 +213,7 @@ public class TestDependentColumnFilter {
}
/**
- * Test that the filter correctly drops rows without a corresponding timestamp n
+ * Test that the filter correctly drops rows without a corresponding timestamp
*/
@Test
public void testFilterDropping() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
index 96c5a1087d8..e0b382a5ea9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
@@ -545,7 +545,7 @@ public class TestFilter {
/**
* Tests the the {@link WhileMatchFilter} works in combination with a {@link Filter} that uses the
- * {@link Filter#filterRow()} method. See HBASE-2258. n
+ * {@link Filter#filterRow()} method. See HBASE-2258.
*/
@Test
public void testWhileMatchFilterWithFilterRow() throws Exception {
@@ -601,7 +601,7 @@ public class TestFilter {
/**
* The following test is to ensure old(such as hbase0.94) filterRow() can be correctly fired in
- * 0.96+ code base. See HBASE-10366 n
+ * 0.96+ code base. See HBASE-10366
*/
@Test
public void test94FilterRowCompatibility() throws Exception {
@@ -617,7 +617,7 @@ public class TestFilter {
/**
* Tests the the {@link WhileMatchFilter} works in combination with a {@link Filter} that uses the
- * {@link Filter#filterRowKey(Cell)} method. See HBASE-2258. n
+ * {@link Filter#filterRowKey(Cell)} method. See HBASE-2258.
*/
@Test
public void testWhileMatchFilterWithFilterRowKey() throws Exception {
@@ -642,7 +642,7 @@ public class TestFilter {
/**
* Tests the the {@link WhileMatchFilter} works in combination with a {@link Filter} that uses the
- * {@link Filter#filterCell(Cell)} method. See HBASE-2258. n
+ * {@link Filter#filterCell(Cell)} method. See HBASE-2258.
*/
@Test
public void testWhileMatchFilterWithFilterCell() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java
index 7a867d41a78..6c278b362c8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java
@@ -120,7 +120,7 @@ public class TestFilterList {
}
/**
- * Test "must pass one" n
+ * Test "must pass one"
*/
@Test
public void testMPONE() throws Exception {
@@ -177,7 +177,7 @@ public class TestFilterList {
}
/**
- * Test "must pass all" n
+ * Test "must pass all"
*/
@Test
public void testMPALL() throws Exception {
@@ -218,7 +218,7 @@ public class TestFilterList {
}
/**
- * Test list ordering n
+ * Test list ordering
*/
@Test
public void testOrdering() throws Exception {
@@ -275,7 +275,7 @@ public class TestFilterList {
/**
* When we do a "MUST_PASS_ONE" (a logical 'OR') of the above two filters we expect to get the
- * same result as the 'prefix' only result. n
+ * same result as the 'prefix' only result.
*/
@Test
public void testFilterListTwoFiltersMustPassOne() throws Exception {
@@ -307,7 +307,7 @@ public class TestFilterList {
/**
* When we do a "MUST_PASS_ONE" (a logical 'OR') of the two filters we expect to get the same
- * result as the inclusive stop result. n
+ * result as the inclusive stop result.
*/
@Test
public void testFilterListWithInclusiveStopFilterMustPassOne() throws Exception {
@@ -344,7 +344,7 @@ public class TestFilterList {
}
/**
- * Test serialization n
+ * Test serialization
*/
@Test
public void testSerialization() throws Exception {
@@ -366,7 +366,7 @@ public class TestFilterList {
}
/**
- * Test filterCell logic. n
+ * Test filterCell logic.
*/
@Test
public void testFilterCell() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java
index e287874be09..caa9998e3bb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java
@@ -70,8 +70,7 @@ public class TestFilterListOrOperatorWithBlkCnt {
public TestName name = new TestName();
/**
- * n
- */
+ * */
@BeforeClass
public static void setUpBeforeClass() throws Exception {
long blkSize = 4096;
@@ -85,8 +84,7 @@ public class TestFilterListOrOperatorWithBlkCnt {
}
/**
- * n
- */
+ * */
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInclusiveStopFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInclusiveStopFilter.java
index b76ce49ed8c..06fed4dc7ff 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInclusiveStopFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInclusiveStopFilter.java
@@ -52,7 +52,7 @@ public class TestInclusiveStopFilter {
}
/**
- * Tests identification of the stop row n
+ * Tests identification of the stop row
*/
@Test
public void testStopRowIdentification() throws Exception {
@@ -60,7 +60,7 @@ public class TestInclusiveStopFilter {
}
/**
- * Tests serialization n
+ * Tests serialization
*/
@Test
public void testSerialization() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java
index 65b58e7a19c..a2388736d4a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java
@@ -67,16 +67,14 @@ public class TestMultiRowRangeFilter {
public TestName name = new TestName();
/**
- * n
- */
+ * */
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster();
}
/**
- * n
- */
+ * */
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java
index 46ef9edb931..80591422a01 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java
@@ -42,7 +42,7 @@ public class TestPageFilter {
static final int ROW_LIMIT = 3;
/**
- * test page size filter n
+ * test page size filter
*/
@Test
public void testPageSize() throws Exception {
@@ -51,7 +51,7 @@ public class TestPageFilter {
}
/**
- * Test filter serialization n
+ * Test filter serialization
*/
@Test
public void testSerialization() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestRandomRowFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestRandomRowFilter.java
index cbced421453..8454ab357ae 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestRandomRowFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestRandomRowFilter.java
@@ -44,7 +44,7 @@ public class TestRandomRowFilter {
}
/**
- * Tests basics n
+ * Tests basics
*/
@Test
public void testBasics() throws Exception {
@@ -63,7 +63,7 @@ public class TestRandomRowFilter {
}
/**
- * Tests serialization n
+ * Tests serialization
*/
@Test
public void testSerialization() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java
index fc1caf6052d..38f7ed46538 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java
@@ -55,7 +55,7 @@ public class TestSingleColumnValueExcludeFilter {
private static final byte[] VAL_2 = Bytes.toBytes("ab");
/**
- * Test the overridden functionality of filterCell(Cell) n
+ * Test the overridden functionality of filterCell(Cell)
*/
@Test
public void testFilterCell() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java
index dc2dbd931a1..fa43f10c1fe 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java
@@ -242,7 +242,7 @@ public class TestSingleColumnValueFilter {
}
/**
- * Tests identification of the stop row n
+ * Tests identification of the stop row
*/
@Test
public void testStop() throws Exception {
@@ -254,7 +254,7 @@ public class TestSingleColumnValueFilter {
}
/**
- * Tests serialization n
+ * Tests serialization
*/
@Test
public void testSerialization() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
index a4623f9f7d8..cdeb3d9de83 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
@@ -79,7 +79,7 @@ public class TestHalfStoreFileReader {
* may or may not be a 'next' in the scanner/file. A bug in the half file scanner was returning -1
* at the end of the bottom half, and that was causing the infrastructure above to go null causing
* NPEs and other problems. This test reproduces that failure, and also tests both the bottom and
- * top of the file while we are at it. n
+ * top of the file while we are at it.
*/
@Test
public void testHalfScanAndReseek() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
index 9457076cdf9..e7842202d17 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
@@ -290,7 +290,7 @@ public class TestHeapSize {
/**
* Testing the classes that implements HeapSize and are a part of 0.20. Some are not tested here
- * for example BlockIndex which is tested in TestHFile since it is a non public class n
+ * for example BlockIndex which is tested in TestHFile since it is a non public class
*/
@Test
public void testSizes() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
index f083c4132b9..eeeb078988f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
@@ -117,7 +117,7 @@ public class TestDataBlockEncoders {
}
/**
- * Test data block encoding of empty KeyValue. n * On test failure.
+ * Test data block encoding of empty KeyValue. On test failure.
*/
@Test
public void testEmptyKeyValues() throws IOException {
@@ -141,7 +141,7 @@ public class TestDataBlockEncoders {
}
/**
- * Test KeyValues with negative timestamp. n * On test failure.
+ * Test KeyValues with negative timestamp. On test failure.
*/
@Test
public void testNegativeTimestamps() throws IOException {
@@ -292,7 +292,7 @@ public class TestDataBlockEncoders {
}
/**
- * Test whether the decompression of first key is implemented correctly. n
+ * Test whether the decompression of first key is implemented correctly.
*/
@Test
public void testFirstKeyInBlockOnSample() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java
index 41886bb1575..396b5076db2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java
@@ -30,7 +30,7 @@ public class NanoTimer {
private long cumulate = 0;
/**
- * Constructor n * Start the timer upon construction.
+ * Constructor Start the timer upon construction.
*/
public NanoTimer(boolean start) {
if (start) this.start();
@@ -99,7 +99,7 @@ public class NanoTimer {
/**
* A utility method to format a time duration in nano seconds into a human understandable stirng.
- * n * Time duration in nano seconds.
+ * Time duration in nano seconds.
* @return String representation.
*/
public static String nanoTimeToString(long t) {
@@ -163,7 +163,7 @@ public class NanoTimer {
}
/**
- * Simple tester. n
+ * Simple tester.
*/
public static void main(String[] args) {
long i = 7;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomKeyValueUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomKeyValueUtil.java
index 5945b36343e..59cb9d2100a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomKeyValueUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomKeyValueUtil.java
@@ -72,7 +72,8 @@ public class RandomKeyValueUtil {
* Generates a random key that is guaranteed to increase as the given index i increases. The
* result consists of a prefix, which is a deterministic increasing function of i, and a random
* suffix.
- * @param rand random number generator to use n * @return the random key
+ * @param rand random number generator to use
+ * @return the random key
*/
public static byte[] randomOrderedKey(Random rand, int i) {
StringBuilder k = new StringBuilder();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
index ce2f0f204a1..3b45f89b8fc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
@@ -308,7 +308,7 @@ public class TestHFile {
}
/**
- * Test empty HFile. Test all features work reasonably when hfile is empty of entries. n
+ * Test empty HFile. Test all features work reasonably when hfile is empty of entries.
*/
@Test
public void testEmptyHFile() throws IOException {
@@ -489,7 +489,7 @@ public class TestHFile {
}
/**
- * test none codecs n
+ * test none codecs
*/
void basicWithSomeCodec(String codec, boolean useTags) throws IOException {
if (useTags) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
index 83e5fd147e0..5b8cfadfde7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
@@ -498,7 +498,7 @@ public class TestHFileBlockIndex {
}
/**
- * to check if looks good when midKey on a leaf index block boundary n
+ * to check if looks good when midKey on a leaf index block boundary
*/
@Test
public void testMidKeyOnLeafIndexBlockBoundary() throws IOException {
@@ -555,7 +555,6 @@ public class TestHFileBlockIndex {
/**
* Testing block index through the HFile writer/reader APIs. Allows to test setting index block
* size through configuration, intermediate-level index blocks, and caching index blocks on write.
- * n
*/
@Test
public void testHFileWriterAndReader() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java
index b9f00899975..7134b19ccec 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java
@@ -136,7 +136,7 @@ public class TestHFileDataBlockEncoder {
}
/**
- * Test encoding. n
+ * Test encoding.
*/
@Test
public void testEncoding() throws IOException {
@@ -146,7 +146,7 @@ public class TestHFileDataBlockEncoder {
/**
* Test encoding with offheap keyvalue. This test just verifies if the encoders work with DBB and
- * does not use the getXXXArray() API n
+ * does not use the getXXXArray() API
*/
@Test
public void testEncodingWithOffheapKeyValue() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCacheRefCnt.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCacheRefCnt.java
index a3f291b7949..e71817e6a3f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCacheRefCnt.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCacheRefCnt.java
@@ -317,8 +317,6 @@ public class TestBucketCacheRefCnt {
* by Thread2 and the content of Block1 would be overwritten after it is freed, which may
* cause a serious error.
*
- *
- * n
*/
@Test
public void testReplacingBlockAndGettingBlockConcurrently() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java
index 5896f9ea696..4b729f33411 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java
@@ -104,7 +104,7 @@ public class TestBucketWriterThread {
}
/**
- * Test non-error case just works. nnn
+ * Test non-error case just works.
*/
@Test
public void testNonErrorCase() throws IOException, InterruptedException {
@@ -114,7 +114,7 @@ public class TestBucketWriterThread {
/**
* Pass through a too big entry and ensure it is cleared from queues and ramCache. Manually run
- * the WriterThread. n
+ * the WriterThread.
*/
@Test
public void testTooBigEntry() throws InterruptedException {
@@ -126,7 +126,7 @@ public class TestBucketWriterThread {
/**
* Do IOE. Take the RAMQueueEntry that was on the queue, doctor it to throw exception, then put it
- * back and process it. nn
+ * back and process it.
*/
@SuppressWarnings("unchecked")
@Test
@@ -143,7 +143,7 @@ public class TestBucketWriterThread {
}
/**
- * Do Cache full exception nn
+ * Do Cache full exception
*/
@Test
public void testCacheFullException() throws IOException, InterruptedException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index 61de51f6a44..9974c824f88 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -194,8 +194,7 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface,
private final Map scannersAndOffsets = new HashMap<>();
/**
- * @param sn Name of this mock regionserver n * @throws
- * org.apache.hadoop.hbase.ZooKeeperConnectionException
+ * @param sn Name of this mock regionserver
*/
MockRegionServer(final Configuration conf, final ServerName sn)
throws ZooKeeperConnectionException, IOException {
@@ -223,7 +222,7 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface,
}
/**
- * Use this method to set what a scanner will reply as we next through nn
+ * Use this method to set what a scanner will reply as we next through
*/
void setNextResults(final byte[] regionName, final Result[] rs) {
this.nexts.put(regionName, rs);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
index 60fb712daff..349a8d8c312 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
@@ -117,7 +117,7 @@ public class TestActiveMasterManager {
/**
* Unit tests that uses ZooKeeper but does not use the master-side methods but rather acts
- * directly on ZK. n
+ * directly on ZK.
*/
@Test
public void testActiveMasterManagerFromZK() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailoverBalancerPersistence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailoverBalancerPersistence.java
index 50f9d3165d2..e80c2c73b55 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailoverBalancerPersistence.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailoverBalancerPersistence.java
@@ -44,7 +44,7 @@ public class TestMasterFailoverBalancerPersistence {
/**
* Test that if the master fails, the load balancer maintains its state (running or not) when the
- * next master takes over n
+ * next master takes over
*/
@Test
public void testMasterFailoverBalancerPersistence() throws Exception {
@@ -82,8 +82,8 @@ public class TestMasterFailoverBalancerPersistence {
}
/**
- * Kill the master and wait for a new active master to show up n * @return the new active master n
- * * @throws java.io.IOException
+ * Kill the master and wait for a new active master to show up
+ * @return the new active master
*/
private HMaster killActiveAndWaitForNewActive(SingleProcessHBaseCluster cluster)
throws InterruptedException, IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java
index 70329633a30..e59ef491912 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java
@@ -63,7 +63,7 @@ public class TestMasterTransitions {
new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") };
/**
- * Start up a mini cluster and put a small table of many empty regions into it. n
+ * Start up a mini cluster and put a small table of many empty regions into it.
*/
@BeforeClass
public static void beforeAllTests() throws Exception {
@@ -124,7 +124,7 @@ public class TestMasterTransitions {
* RegionServerOperation op) { if (isWantedCloseOperation(op) != null) return; this.done = true; }
*/
/*
- * n * @return Null if not the wanted ProcessRegionClose, else op cast as a
+ * @return Null if not the wanted ProcessRegionClose, else op cast as a
* ProcessRegionClose.
*/
/*
@@ -285,7 +285,7 @@ public class TestMasterTransitions {
* entry in its startkey because of addRowToEachRegion. byte [] row = getStartKey(hri); HTable t =
* new HTable(TEST_UTIL.getConfiguration(), TABLENAME); Get g = new Get(row);
* assertTrue((t.get(g)).size() > 0); } /*
- * @return Count of regions in meta table. n
+ * @return Count of regions in meta table.
*/
/*
* private static int countOfMetaRegions() throws IOException { HTable meta = new
@@ -297,7 +297,7 @@ public class TestMasterTransitions {
*/
/*
* Add to each of the regions in hbase:meta a value. Key is the startrow of the region (except its
- * 'aaa' for first region). Actual value is the row name. nnn
+ * 'aaa' for first region). Actual value is the row name.
*/
private static int addToEachStartKey(final int expected) throws IOException {
Table t = TEST_UTIL.getConnection().getTable(TABLENAME);
@@ -332,7 +332,7 @@ public class TestMasterTransitions {
}
/*
- * n * @return Start key for hri (If start key is '', then return 'aaa'.
+ * @return Start key for hri (If start key is '', then return 'aaa'.
*/
private static byte[] getStartKey(final RegionInfo hri) {
return Bytes.equals(HConstants.EMPTY_START_ROW, hri.getStartKey())
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
index b606dff6613..cccc05072cc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
@@ -344,7 +344,7 @@ public class TestRegionPlacement {
/**
* To verify the region assignment status. It will check the assignment plan consistency between
* hbase:meta and region servers. Also it will verify weather the number of region movement and
- * the number regions on the primary region server are expected nnnnn
+ * the number regions on the primary region server are expected
*/
private void verifyRegionAssignment(FavoredNodesPlan plan, int regionMovementNum,
int numRegionsOnPrimaryRS) throws InterruptedException, IOException {
@@ -405,7 +405,7 @@ public class TestRegionPlacement {
/**
* Verify the number of user regions is assigned to the primary region server based on the plan is
* expected
- * @param expectedNum the expected number of assigned regions n
+ * @param expectedNum the expected number of assigned regions
*/
private void verifyRegionOnPrimaryRS(int expectedNum) throws IOException {
lastRegionOnPrimaryRSCount = getNumRegionisOnPrimaryRS();
@@ -415,7 +415,7 @@ public class TestRegionPlacement {
}
/**
- * Verify all the online region servers has been updated to the latest assignment plan nn
+ * Verify all the online region servers has been updated to the latest assignment plan
*/
private void verifyRegionServerUpdated(FavoredNodesPlan plan) throws IOException {
// Verify all region servers contain the correct favored nodes information
@@ -463,7 +463,7 @@ public class TestRegionPlacement {
* Check whether regions are assigned to servers consistent with the explicit hints that are
* persisted in the hbase:meta table. Also keep track of the number of the regions are assigned to
* the primary region server.
- * @return the number of regions are assigned to the primary region server n
+ * @return the number of regions are assigned to the primary region server
*/
private int getNumRegionisOnPrimaryRS() throws IOException {
final AtomicInteger regionOnPrimaryNum = new AtomicInteger(0);
@@ -530,7 +530,7 @@ public class TestRegionPlacement {
/**
* Create a table with specified table name and region number.
* @param tableName the name of the table to be created
- * @param regionNum number of regions to create n
+ * @param regionNum number of regions to create
*/
private static void createTable(TableName tableName, int regionNum) throws IOException {
int expectedRegions = regionNum;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticBalancerJmxMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticBalancerJmxMetrics.java
index a96e320a6fe..7cea8fdf47f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticBalancerJmxMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticBalancerJmxMetrics.java
@@ -213,7 +213,7 @@ public class TestStochasticBalancerJmxMetrics extends BalancerTestBase {
}
/**
- * Read the attributes from Hadoop->HBase->Master->Balancer in JMX n
+ * Read the attributes from Hadoop->HBase->Master->Balancer in JMX
*/
private Set readJmxMetrics() throws IOException {
JMXConnector connector = null;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java
index 6ead527ba1f..3898a83247d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java
@@ -354,7 +354,7 @@ public class TestCatalogJanitor {
}
/**
- * Test that we correctly archive all the storefiles when a region is deleted n
+ * Test that we correctly archive all the storefiles when a region is deleted
*/
@Test
public void testSplitParentFirstComparator() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java
index 997ceef9bff..ce7b57987e1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java
@@ -174,8 +174,8 @@ public class TestCatalogJanitorInMemoryStates {
/*
* Wait on region split. May return because we waited long enough on the split and it didn't
- * happen. Caller should check. n * @return Daughter regions; caller needs to check table actually
- * split.
+ * happen. Caller should check.
+ * @return Daughter regions; caller needs to check table actually split.
*/
private PairOfSameType waitOnDaughters(final RegionInfo r) throws IOException {
long start = EnvironmentEdgeManager.currentTime();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index 5aafe7743cb..8b7bd8d92e7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -487,7 +487,7 @@ public class MasterProcedureTestingUtility {
public interface StepHook {
/**
* @param step Step no. at which this will be executed
- * @return false if test should fail otherwise true n
+ * @return false if test should fail otherwise true
*/
boolean execute(int step) throws IOException;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
index 070b873e1ec..6f95aa43657 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
@@ -67,7 +67,7 @@ public class TestTableDescriptorModificationFromClient {
private static final byte[] FAMILY_1 = Bytes.toBytes("cf1");
/**
- * Start up a mini cluster and put a small table of empty regions into it. n
+ * Start up a mini cluster and put a small table of empty regions into it.
*/
@BeforeClass
public static void beforeAllTests() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java
index 9b51c22db19..08e7a4bbcf7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java
@@ -64,7 +64,7 @@ public class MobTestUtil {
}
/*
- * Writes HStoreKey and ImmutableBytes data to passed writer and then closes it. n * n
+ * Writes HStoreKey and ImmutableBytes data to passed writer and then closes it.
*/
private static void writeStoreFile(final StoreFileWriter writer, byte[] fam, byte[] qualifier)
throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java
index 6fdea13498f..49ca13566d2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java
@@ -141,7 +141,7 @@ public class SimpleRSProcedureManager extends RegionServerProcedureManager {
/**
* Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)}
- * @return true on success, false otherwise n
+ * @return true on success, false otherwise
*/
public boolean waitForOutstandingTasks() throws ForeignException {
LOG.debug("Waiting for procedure to finish.");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java
index 8339796c8ec..dcc58033045 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java
@@ -319,8 +319,8 @@ public class TestZKProcedureControllers {
}
/**
- * n * @return a mock {@link ProcedureCoordinator} that just counts down the prepared and
- * committed latch for called to the respective method
+ * @return a mock {@link ProcedureCoordinator} that just counts down the prepared and committed
+ * latch for called to the respective method
*/
private ProcedureCoordinator setupMockCoordinator(String operationName,
final CountDownLatch prepared, final CountDownLatch committed,
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java
index c476ff95f48..5dd0ce8dafb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java
@@ -42,7 +42,7 @@ public class TestReplicationProtobuf {
HBaseClassTestRule.forClass(TestReplicationProtobuf.class);
/**
- * Little test to check we can basically convert list of a list of KVs into a CellScanner n
+ * Little test to check we can basically convert list of a list of KVs into a CellScanner
*/
@Test
public void testGetCellScanner() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java
index 3bb18d8f5ce..4754c5ba530 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java
@@ -81,7 +81,7 @@ public class CreateRandomStoreFile {
/**
* Runs the tools.
* @param args command-line arguments
- * @return true in case of success n
+ * @return true in case of success
*/
public boolean run(String[] args) throws IOException {
options.addOption(OUTPUT_DIR_OPTION, "output_dir", true, "Output directory");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
index 24ceb64973f..ec9de92e9f2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
@@ -388,7 +388,7 @@ public class DataBlockEncodingTool {
* @param name Name of algorithm.
* @param buffer Buffer to be compressed.
* @param offset Position of the beginning of the data.
- * @param length Length of data in buffer. n
+ * @param length Length of data in buffer.
*/
public void benchmarkAlgorithm(Compression.Algorithm algorithm, String name, byte[] buffer,
int offset, int length) throws IOException {
@@ -521,7 +521,7 @@ public class DataBlockEncodingTool {
}
/**
- * Display statistics of different compression algorithms. n
+ * Display statistics of different compression algorithms.
*/
public void displayStatistics() throws IOException {
final String comprAlgo = compressionAlgorithmName.toUpperCase(Locale.ROOT);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
index 01ffcc1ac79..0f8fcea47a7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
@@ -156,7 +156,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
}
/**
- * A simple test which verifies the 3 possible states when scanning across snapshot. nn
+ * A simple test which verifies the 3 possible states when scanning across snapshot.
*/
@Override
@Test
@@ -198,7 +198,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
}
/**
- * Test memstore snapshots n
+ * Test memstore snapshots
*/
@Override
@Test
@@ -217,7 +217,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
//////////////////////////////////////////////////////////////////////////////
/**
- * Test getNextRow from memstore n
+ * Test getNextRow from memstore
*/
@Override
@Test
@@ -304,7 +304,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
/**
* Add keyvalues with a fixed memstoreTs, and checks that memstore size is decreased as older
- * keyvalues are deleted from the memstore. n
+ * keyvalues are deleted from the memstore.
*/
@Override
@Test
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
index 6625da612d5..c4d5d35f7d8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
@@ -782,7 +782,7 @@ public class TestCompaction {
* We set compaction.kv.max to 10 so compaction will scan 10 versions each round, meanwhile we set
* keepSeqIdPeriod=0 in {@link DummyCompactor} so all 10 versions of hfile2 will be written out
* with seqId cleaned (set to 0) including cell-B, then when scanner goes to cell-A it will cause
- * a scan out-of-order assertion error before HBASE-16931 n * if error occurs during the test
+ * a scan out-of-order assertion error before HBASE-16931 if error occurs during the test
*/
@Test
public void testCompactionSeqId() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java
index c9f30d17410..aa94cc54579 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java
@@ -147,8 +147,9 @@ public class TestCompactionState {
/**
* Load data to a table, flush it to disk, trigger compaction, confirm the compaction state is
- * right and wait till it is done. nnn * @param singleFamily otherwise, run compaction on all cfs
- * @param stateSource get the state by Admin or Master nn
+ * right and wait till it is done.
+ * @param singleFamily otherwise, run compaction on all cfs
+ * @param stateSource get the state by Admin or Master
*/
private void compaction(final String tableName, final int flushes,
final CompactionState expectedState, boolean singleFamily, StateSource stateSource)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java
index f11a5fa66b3..475454d6763 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java
@@ -242,7 +242,7 @@ public class TestDateTieredCompactionPolicy extends AbstractTestDateTieredCompac
/**
* Major Compaction to check min max timestamp falling in the same window and also to check
- * boundary condition in which case binary sort gives insertion point as length of the array n
+ * boundary condition in which case binary sort gives insertion point as length of the array
*/
@Test
public void checkMinMaxTimestampSameBoundary() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index ab6d2d03453..adccec5d8e5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -160,7 +160,7 @@ public class TestDefaultMemStore {
}
/**
- * Test memstore snapshot happening while scanning. n
+ * Test memstore snapshot happening while scanning.
*/
@Test
public void testScanAcrossSnapshot() throws IOException {
@@ -236,7 +236,7 @@ public class TestDefaultMemStore {
}
/**
- * A simple test which verifies the 3 possible states when scanning across snapshot. nn
+ * A simple test which verifies the 3 possible states when scanning across snapshot.
*/
@Test
public void testScanAcrossSnapshot2() throws IOException, CloneNotSupportedException {
@@ -528,7 +528,7 @@ public class TestDefaultMemStore {
}
/**
- * Test memstore snapshots n
+ * Test memstore snapshots
*/
@Test
public void testSnapshotting() throws IOException {
@@ -566,7 +566,7 @@ public class TestDefaultMemStore {
//////////////////////////////////////////////////////////////////////////////
/**
- * Test getNextRow from memstore n
+ * Test getNextRow from memstore
*/
@Test
public void testGetNextRow() throws Exception {
@@ -816,7 +816,7 @@ public class TestDefaultMemStore {
/**
* Add keyvalues with a fixed memstoreTs, and checks that memstore size is decreased as older
- * keyvalues are deleted from the memstore. n
+ * keyvalues are deleted from the memstore.
*/
@Test
public void testUpsertMemstoreSize() throws Exception {
@@ -860,7 +860,7 @@ public class TestDefaultMemStore {
/**
* Tests that the timeOfOldestEdit is updated correctly for the various edit operations in
- * memstore. n
+ * memstore.
*/
@Test
public void testUpdateToTimeOfOldestEdit() throws Exception {
@@ -901,7 +901,7 @@ public class TestDefaultMemStore {
/**
* Tests the HRegion.shouldFlush method - adds an edit in the memstore and checks that shouldFlush
* returns true, and another where it disables the periodic flush functionality and tests whether
- * shouldFlush returns false. n
+ * shouldFlush returns false.
*/
@Test
public void testShouldFlush() throws Exception {
@@ -1004,7 +1004,7 @@ public class TestDefaultMemStore {
/**
* Adds {@link #ROW_COUNT} rows and {@link #QUALIFIER_COUNT}
* @param hmc Instance to add rows to.
- * @return How many rows we added. n
+ * @return How many rows we added.
*/
protected int addRows(final AbstractMemStore hmc) {
return addRows(hmc, HConstants.LATEST_TIMESTAMP);
@@ -1013,7 +1013,7 @@ public class TestDefaultMemStore {
/**
* Adds {@link #ROW_COUNT} rows and {@link #QUALIFIER_COUNT}
* @param hmc Instance to add rows to.
- * @return How many rows we added. n
+ * @return How many rows we added.
*/
protected int addRows(final MemStore hmc, final long ts) {
for (int i = 0; i < ROW_COUNT; i++) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java
index 9c322147fd5..948f1d33a6a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java
@@ -73,7 +73,7 @@ public class TestDeleteMobTable {
}
/**
- * Generate the mob value. n * the size of the value
+ * Generate the mob value. the size of the value
* @return the mob value generated
*/
private static byte[] generateMobValue(int size) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
index 898677b5a2a..6735e472229 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
@@ -121,7 +121,7 @@ public class TestHMobStore {
private final String DIR = TEST_UTIL.getDataTestDir("TestHMobStore").toString();
/**
- * Setup n
+ * Setup
*/
@Before
public void setUp() throws Exception {
@@ -205,7 +205,7 @@ public class TestHMobStore {
}
/**
- * Getting data from memstore n
+ * Getting data from memstore
*/
@Test
public void testGetFromMemStore() throws IOException {
@@ -238,7 +238,7 @@ public class TestHMobStore {
}
/**
- * Getting MOB data from files n
+ * Getting MOB data from files
*/
@Test
public void testGetFromFiles() throws IOException {
@@ -280,7 +280,7 @@ public class TestHMobStore {
}
/**
- * Getting the reference data from files n
+ * Getting the reference data from files
*/
@Test
public void testGetReferencesFromFiles() throws IOException {
@@ -324,7 +324,7 @@ public class TestHMobStore {
}
/**
- * Getting data from memstore and files n
+ * Getting data from memstore and files
*/
@Test
public void testGetFromMemStoreAndFiles() throws IOException {
@@ -366,7 +366,7 @@ public class TestHMobStore {
}
/**
- * Getting data from memstore and files n
+ * Getting data from memstore and files
*/
@Test
public void testMobCellSizeThreshold() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 4147f46f4ee..3ac6faf2df8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -270,7 +270,7 @@ public class TestHRegion {
}
/**
- * Test that I can use the max flushed sequence id after the close. n
+ * Test that I can use the max flushed sequence id after the close.
*/
@Test
public void testSequenceId() throws IOException {
@@ -304,7 +304,7 @@ public class TestHRegion {
* starts a flush is ongoing, the first flush is skipped and only the second flush takes place.
* However, two flushes are required in case previous flush fails and leaves some data in
* snapshot. The bug could cause loss of data in current memstore. The fix is removing all
- * conditions except abort check so we ensure 2 flushes for region close." n
+ * conditions except abort check so we ensure 2 flushes for region close."
*/
@Test
public void testCloseCarryingSnapshot() throws IOException {
@@ -476,7 +476,7 @@ public class TestHRegion {
* proceed for a couple cycles, the size in current memstore could be much larger than the
* snapshot. It's likely to drift memstoreSize much smaller than expected. In extreme case, if the
* error accumulates to even bigger than HRegion's memstore size limit, any further flush is
- * skipped because flush does not do anything if memstoreSize is not larger than 0." n
+ * skipped because flush does not do anything if memstoreSize is not larger than 0."
*/
@Test
public void testFlushSizeAccounting() throws Exception {
@@ -3725,7 +3725,7 @@ public class TestHRegion {
}
/**
- * This method tests https://issues.apache.org/jira/browse/HBASE-2516. n
+ * This method tests https://issues.apache.org/jira/browse/HBASE-2516.
*/
@Test
public void testGetScanner_WithRegionClosed() throws IOException {
@@ -4400,7 +4400,7 @@ public class TestHRegion {
/**
* Write an HFile block full with Cells whose qualifier that are identical between 0 and
- * Short.MAX_VALUE. See HBASE-13329. n
+ * Short.MAX_VALUE. See HBASE-13329.
*/
@Test
public void testLongQualifier() throws Exception {
@@ -4420,8 +4420,8 @@ public class TestHRegion {
/**
* Flushes the cache in a thread while scanning. The tests verify that the scan is coherent - e.g.
- * the returned results are always of the same or later update as the previous results. n * scan /
- * compact n * thread join
+ * the returned results are always of the same or later update as the previous results. scan /
+ * compact thread join
*/
@Test
public void testFlushCacheWhileScanning() throws IOException, InterruptedException {
@@ -4562,8 +4562,7 @@ public class TestHRegion {
/**
* Writes very wide records and scans for the latest every time.. Flushes and compacts the region
- * every now and then to keep things realistic. n * by flush / scan / compaction n * when joining
- * threads
+ * every now and then to keep things realistic. by flush / scan / compaction when joining threads
*/
@Test
public void testWritesWhileScanning() throws IOException, InterruptedException {
@@ -4735,7 +4734,7 @@ public class TestHRegion {
/**
* Writes very wide records and gets the latest row every time.. Flushes and compacts the region
- * aggressivly to catch issues. n * by flush / scan / compaction n * when joining threads
+ * aggressivly to catch issues. by flush / scan / compaction when joining threads
*/
@Test
public void testWritesWhileGetting() throws Exception {
@@ -5101,7 +5100,7 @@ public class TestHRegion {
/**
* Testcase to check state of region initialization task set to ABORTED or not if any exceptions
- * during initialization n
+ * during initialization
*/
@Test
public void testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization() throws Exception {
@@ -5223,7 +5222,7 @@ public class TestHRegion {
}
/**
- * Test case to check increment function with memstore flushing n
+ * Test case to check increment function with memstore flushing
*/
@Test
public void testParallelIncrementWithMemStoreFlush() throws Exception {
@@ -5308,7 +5307,7 @@ public class TestHRegion {
}
/**
- * Test case to check append function with memstore flushing n
+ * Test case to check append function with memstore flushing
*/
@Test
public void testParallelAppendWithMemStoreFlush() throws Exception {
@@ -5366,7 +5365,7 @@ public class TestHRegion {
}
/**
- * Test case to check put function with memstore flushing for same row, same ts n
+ * Test case to check put function with memstore flushing for same row, same ts
*/
@Test
public void testPutWithMemStoreFlush() throws Exception {
@@ -5739,7 +5738,7 @@ public class TestHRegion {
}
/*
- * Assert first value in the passed region is firstValue . n * n * n * n
+ * Assert first value in the passed region is firstValue .
*/
protected void assertScan(final HRegion r, final byte[] fs, final byte[] firstValue)
throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index a6551d492a3..61fd56e149d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -1140,7 +1140,7 @@ public class TestHRegionReplayEvents {
/**
* Tests that a region opened in secondary mode would not write region open / close events to its
- * WAL. n
+ * WAL.
*/
@Test
public void testSecondaryRegionDoesNotWriteRegionEventsToWAL() throws IOException {
@@ -1652,7 +1652,7 @@ public class TestHRegionReplayEvents {
/**
* Puts a total of numRows + numRowsAfterFlush records indexed with numeric row keys. Does a flush
* every flushInterval number of records. Then it puts numRowsAfterFlush number of more rows but
- * does not execute flush after n
+ * does not execute flush after
*/
private void putDataWithFlushes(HRegion region, int flushInterval, int numRows,
int numRowsAfterFlush) throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
index 788cec65ee4..2a2e7a2d2fa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
@@ -177,7 +177,7 @@ public class TestHStoreFile {
byte[] SPLITKEY = new byte[] { (LAST_CHAR + FIRST_CHAR) / 2, FIRST_CHAR };
/*
- * Writes HStoreKey and ImmutableBytes data to passed writer and then closes it. nn
+ * Writes HStoreKey and ImmutableBytes data to passed writer and then closes it.
*/
public static void writeStoreFile(final StoreFileWriter writer, byte[] fam, byte[] qualifier)
throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java
index 6f14eee1e9b..be961bb396d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java
@@ -191,7 +191,8 @@ public class TestJoinedScanners {
private static Options options = new Options();
/**
- * Command line interface: n * @throws IOException if there is a bug while reading from disk
+ * Command line interface:
+ * @throws IOException if there is a bug while reading from disk
*/
public static void main(final String[] args) throws Exception {
Option encodingOption =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
index 8251578fabb..4c96dc221ab 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
@@ -158,7 +158,7 @@ public class TestMajorCompaction {
}
/**
- * Run compaction and flushing memstore Assert deletes get cleaned up. n
+ * Run compaction and flushing memstore Assert deletes get cleaned up.
*/
@Test
public void testMajorCompaction() throws Exception {
@@ -417,7 +417,7 @@ public class TestMajorCompaction {
/**
* Test that on a major compaction, if all cells are expired or deleted, then we'll end up with no
* product. Make sure scanner over region returns right answer in this case - and that it just
- * basically works. n
+ * basically works.
*/
@Test
public void testMajorCompactingToNoOutputWithReverseScan() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java
index 82f653d35f2..f839e45ad53 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java
@@ -80,7 +80,7 @@ public class TestRSKilledWhenInitializing {
/**
* Test verifies whether a region server is removed from online servers list in master if it went
- * down after registering with master. Test will TIMEOUT if an error!!!! n
+ * down after registering with master. Test will TIMEOUT if an error!!!!
*/
@Test
public void testRSTerminationAfterRegisteringToMasterBeforeCreatingEphemeralNode()
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java
index 22b06b68bc2..4650276531e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java
@@ -126,7 +126,7 @@ public class TestRegionReplicaFailover {
/**
* Tests the case where if there is some data in the primary region, reopening the region replicas
- * (enable/disable table, etc) makes the region replicas readable. n
+ * (enable/disable table, etc) makes the region replicas readable.
*/
@Test
public void testSecondaryRegionWithNonEmptyRegion() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java
index 228883e386b..b4bbf42577d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java
@@ -127,7 +127,7 @@ public class TestRegionServerOnlineConfigChange {
}
/**
- * Test that the configurations in the CompactionConfiguration class change properly. n
+ * Test that the configurations in the CompactionConfiguration class change properly.
*/
@Test
public void testCompactionConfigurationOnlineChange() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRequestsPerSecondMetric.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRequestsPerSecondMetric.java
index 1343802aa77..8b33912035e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRequestsPerSecondMetric.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRequestsPerSecondMetric.java
@@ -69,7 +69,7 @@ public class TestRequestsPerSecondMetric {
* count. Next, we disable a table and all of its 25 regions will be closed. As part of region
* close, his metric will also be removed from metricCache. prior to HBASE-23237, we do not
* remove/reset his metric so we incorrectly compute (currentRequestCount - lastRequestCount)
- * which result into negative value. nn
+ * which result into negative value.
*/
public void testNoNegativeSignAtRequestsPerSecond() throws IOException, InterruptedException {
final TableName TABLENAME = TableName.valueOf("t");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java
index 4d3448061f0..418c1808a64 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java
@@ -105,7 +105,7 @@ public class TestRowTooBig {
/**
* Usecase: - create a row with 1M cells, 10 bytes in each - flush & run major compaction - try to
- * Get whole row. OOME happened in StoreScanner.next(..). n
+ * Get whole row. OOME happened in StoreScanner.next(..).
*/
@Test(expected = RowTooBigException.class)
public void testScanAcrossManySmallColumns() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
index ed7d760d276..779fb9a4c90 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
@@ -546,7 +546,7 @@ public class TestScanner {
* @param hri Region
* @param flushIndex At what row we start the flush.
* @param concurrent if the flush should be concurrent or sync.
- * @return Count of rows found. n
+ * @return Count of rows found.
*/
private int count(final Table countTable, final int flushIndex, boolean concurrent)
throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index 812adcc4e83..0a7f4ee9788 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -1003,8 +1003,7 @@ public class TestSplitTransactionOnCluster {
/**
* Ensure single table region is not on same server as the single hbase:meta table region.
- * @return Index of the server hosting the single table region nn * @throws
- * org.apache.hadoop.hbase.ZooKeeperConnectionException n
+ * @return Index of the server hosting the single table region
*/
private int ensureTableRegionNotOnSameServerAsMeta(final Admin admin, final RegionInfo hri)
throws IOException, MasterNotRunningException, ZooKeeperConnectionException,
@@ -1046,8 +1045,8 @@ public class TestSplitTransactionOnCluster {
/**
* Find regionserver other than the one passed. Can't rely on indexes into list of regionservers
- * since crashed servers occupy an index. nn * @return A regionserver that is not
- * notThisOne or null if none found
+ * since crashed servers occupy an index.
+ * @return A regionserver that is not notThisOne or null if none found
*/
private HRegionServer getOtherRegionServer(final SingleProcessHBaseCluster cluster,
final HRegionServer notThisOne) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
index c1843e306f1..91717060d99 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
@@ -378,7 +378,7 @@ public class TestStoreScanner {
}
/*
- * Test utility for building a NavigableSet for scanners. nn
+ * Test utility for building a NavigableSet for scanners.
*/
NavigableSet getCols(String... strCols) {
NavigableSet cols = new TreeSet<>(Bytes.BYTES_COMPARATOR);
@@ -606,7 +606,7 @@ public class TestStoreScanner {
/*
* Test test shows exactly how the matcher's return codes confuses the StoreScanner and prevent it
* from doing the right thing. Seeking once, then nexting twice should return R1, then R2, but in
- * this case it doesnt. TODO this comment makes no sense above. Appears to do the right thing. n
+ * this case it doesnt. TODO this comment makes no sense above. Appears to do the right thing.
*/
@Test
public void testWontNextToNext() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java
index 3d3d4c59309..8cabe1fb363 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java
@@ -162,7 +162,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher {
/**
* Verify that {@link ScanQueryMatcher} only skips expired KeyValue instances and does not exit
* early from the row (skipping later non-expired KeyValues). This version mimics a Get with
- * explicitly specified column qualifiers. n
+ * explicitly specified column qualifiers.
*/
@Test
public void testMatch_ExpiredExplicit() throws IOException {
@@ -205,7 +205,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher {
/**
* Verify that {@link ScanQueryMatcher} only skips expired KeyValue instances and does not exit
* early from the row (skipping later non-expired KeyValues). This version mimics a Get with
- * wildcard-inferred column qualifiers. n
+ * wildcard-inferred column qualifiers.
*/
@Test
public void testMatch_ExpiredWildcard() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
index 4d4d6bc1047..f146926010c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
@@ -202,7 +202,7 @@ public abstract class AbstractTestFSWAL {
/**
* tests the log comparator. Ensure that we are not mixing meta logs with non-meta logs (throws
- * exception if we do). Comparison is based on the timestamp present in the wal name. n
+ * exception if we do). Comparison is based on the timestamp present in the wal name.
*/
@Test
public void testWALComparator() throws Exception {
@@ -259,7 +259,7 @@ public abstract class AbstractTestFSWAL {
*
* This method tests this behavior by inserting edits and rolling the wal enough times to reach
* the max number of logs threshold. It checks whether we get the "right regions and stores" for
- * flush on rolling the wal. n
+ * flush on rolling the wal.
*/
@Test
public void testFindMemStoresEligibleForFlush() throws Exception {
@@ -403,8 +403,8 @@ public abstract class AbstractTestFSWAL {
* Test flush for sure has a sequence id that is beyond the last edit appended. We do this by
* slowing appends in the background ring buffer thread while in foreground we call flush. The
* addition of the sync over HRegion in flush should fix an issue where flush was returning before
- * all of its appends had made it out to the WAL (HBASE-11109). n * @see
- * HBASE-11109
+ * all of its appends had made it out to the WAL (HBASE-11109).
+ * @see HBASE-11109
*/
@Test
public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java
index 6385c1794bf..8f7704fb3f6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java
@@ -85,7 +85,7 @@ public abstract class AbstractTestProtobufLog {
}
/**
- * Reads the WAL with and without WALTrailer. n
+ * Reads the WAL with and without WALTrailer.
*/
@Test
public void testWALTrailer() throws IOException {
@@ -101,7 +101,7 @@ public abstract class AbstractTestProtobufLog {
* so that a trailer is appended to the WAL. Otherwise, it starts reading after
* the sync call. This means that reader is not aware of the trailer. In this
* scenario, if the reader tries to read the trailer in its next() call, it
- * returns false from ProtoBufLogReader. n
+ * returns false from ProtoBufLogReader.
*/
private void doRead(boolean withTrailer) throws IOException {
int columnCount = 5;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
index 554ca5ebaef..6a9fcfde4ed 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
@@ -185,8 +185,7 @@ public abstract class AbstractTestWALReplay {
}
/**
- * n
- */
+ * */
@Test
public void testReplayEditsAfterRegionMovedWithMultiCF() throws Exception {
final TableName tableName = TableName.valueOf("testReplayEditsAfterRegionMovedWithMultiCF");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
index 6242177d410..130a5e73ba0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
@@ -423,7 +423,7 @@ public class TestLogRolling extends AbstractTestLogRolling {
}
/**
- * Test that WAL is rolled when all data nodes in the pipeline have been restarted. n
+ * Test that WAL is rolled when all data nodes in the pipeline have been restarted.
*/
@Test
public void testLogRollOnPipelineRestart() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
index bdc343181f0..376bec6bf91 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
@@ -79,7 +79,7 @@ public class TestLogRollingNoCluster {
/**
* Spin up a bunch of threads and have them all append to a WAL. Roll the WAL frequently to try
- * and trigger NPE. nn
+ * and trigger NPE.
*/
@Test
public void testContendedLogRolling() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java
index fdb88304dae..fa7548e3ecc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java
@@ -46,7 +46,7 @@ public class TestReplicationDisableInactivePeer extends TestReplicationBase {
/**
* Test disabling an inactive peer. Add a peer which is inactive, trying to insert, disable the
* peer, then activate the peer and make sure nothing is replicated. In Addition, enable the peer
- * and check the updates are replicated. n
+ * and check the updates are replicated.
*/
@Test
public void testDisableInactivePeer() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java
index d9106b66759..07e2d7ae819 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java
@@ -157,7 +157,7 @@ public class TestReplicationSink {
}
/**
- * Insert a whole batch of entries n
+ * Insert a whole batch of entries
*/
@Test
public void testBatchSink() throws Exception {
@@ -174,7 +174,7 @@ public class TestReplicationSink {
}
/**
- * Insert a mix of puts and deletes n
+ * Insert a mix of puts and deletes
*/
@Test
public void testMixedPutDelete() throws Exception {
@@ -234,7 +234,7 @@ public class TestReplicationSink {
}
/**
- * Insert to 2 different tables n
+ * Insert to 2 different tables
*/
@Test
public void testMixedPutTables() throws Exception {
@@ -258,7 +258,7 @@ public class TestReplicationSink {
}
/**
- * Insert then do different types of deletes n
+ * Insert then do different types of deletes
*/
@Test
public void testMixedDeletes() throws Exception {
@@ -285,7 +285,7 @@ public class TestReplicationSink {
/**
* Puts are buffered, but this tests when a delete (not-buffered) is applied before the actual Put
- * that creates it. n
+ * that creates it.
*/
@Test
public void testApplyDeleteBeforePut() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java
index 9d811618b59..c7ac87f0821 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java
@@ -101,7 +101,7 @@ public class TestWALEntrySinkFilter {
* Test filter. Filter will filter out any write time that is <= 5 (BOUNDARY). We count how many
* items we filter out and we count how many cells make it through for distribution way down below
* in the Table#batch implementation. Puts in place a custom DevNullConnection so we can insert
- * our counting Table. n
+ * our counting Table.
*/
@Test
public void testWALEntryFilter() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java
index f2a0a229e55..26405f4446b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java
@@ -296,7 +296,7 @@ public class AbstractTestSecureIPC {
}
/**
- * Test various combinations of Server and Client configuration for Crypto AES. n
+ * Test various combinations of Server and Client configuration for Crypto AES.
*/
@Test
public void testDifferentConfWithCryptoAES() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUsersOperationsWithSecureHadoop.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUsersOperationsWithSecureHadoop.java
index 0928e0d995b..dc5a5ebcfa4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUsersOperationsWithSecureHadoop.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUsersOperationsWithSecureHadoop.java
@@ -88,7 +88,7 @@ public class TestUsersOperationsWithSecureHadoop {
*
* hbase.regionserver.kerberos.principal
*
- * hbase.regionserver.keytab.file n
+ * hbase.regionserver.keytab.file
*/
@Test
public void testUserLoginInSecureHadoop() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index b25ba123262..5d71486e3c0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -92,7 +92,7 @@ public final class SnapshotTestingUtils {
}
/**
- * Assert that we don't have any snapshots lists n * if the admin operation fails
+ * Assert that we don't have any snapshots lists if the admin operation fails
*/
public static void assertNoSnapshots(Admin admin) throws IOException {
assertEquals("Have some previous snapshots", 0, admin.listSnapshots().size());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java
index 1574f80a321..24a28d18751 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java
@@ -156,7 +156,7 @@ public class TestBulkLoadHFilesSplitRecovery {
/**
* Creates a table with given table name,specified number of column families
- * and splitkeys if the table does not already exist. nnn
+ * and splitkeys if the table does not already exist.
*/
private void setupTableWithSplitkeys(TableName table, int cfs, byte[][] SPLIT_KEYS)
throws IOException {
@@ -236,7 +236,6 @@ public class TestBulkLoadHFilesSplitRecovery {
/**
* Checks that all columns have the expected value and that there is the expected number of rows.
- * n
*/
void assertExpectedTable(TableName table, int count, int value) throws IOException {
TableDescriptor htd = util.getAdmin().getDescriptor(table);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
index 99efaff2ae6..0b989b8029f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
@@ -213,7 +213,7 @@ public class BaseTestHBaseFsck {
/**
* Setup a clean table before we start mucking with it. It will set tbl which needs to be closed
- * after test nnn
+ * after test
*/
void setupTable(TableName tablename) throws Exception {
setupTableWithRegionReplica(tablename, 1);
@@ -221,7 +221,7 @@ public class BaseTestHBaseFsck {
/**
* Setup a clean table with a certain region_replica count It will set tbl which needs to be
- * closed after test n
+ * closed after test
*/
void setupTableWithRegionReplica(TableName tablename, int replicaCount) throws Exception {
TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tablename);
@@ -244,7 +244,7 @@ public class BaseTestHBaseFsck {
/**
* Setup a clean table with a mob-enabled column.
- * @param tablename The name of a table to be created. n
+ * @param tablename The name of a table to be created.
*/
void setupMobTable(TableName tablename) throws Exception {
TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tablename);
@@ -332,7 +332,8 @@ public class BaseTestHBaseFsck {
/**
* We don't have an easy way to verify that a flush completed, so we loop until we find a
- * legitimate hfile and return it. nn * @return Path of a flushed hfile. n
+ * legitimate hfile and return it.
+ * @return Path of a flushed hfile.
*/
Path getFlushedHFile(FileSystem fs, TableName table) throws IOException {
Path tableDir = CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), table);
@@ -357,7 +358,7 @@ public class BaseTestHBaseFsck {
* Gets flushed mob files.
* @param fs The current file system.
* @param table The current table name.
- * @return Path of a flushed hfile. n
+ * @return Path of a flushed hfile.
*/
Path getFlushedMobFile(FileSystem fs, TableName table) throws IOException {
Path famDir = MobUtils.getMobFamilyPath(conf, table, FAM_STR);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
index ae0754c0802..3aa247c0632 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
@@ -85,7 +85,7 @@ public class HFileArchiveTestingUtil {
* Compare the archived files to the files in the original directory
* @param expected original files that should have been archived
* @param actual files that were archived
- * @param fs filessystem on which the archiving took place n
+ * @param fs filessystem on which the archiving took place
*/
public static void assertArchiveEqualToOriginal(FileStatus[] expected, FileStatus[] actual,
FileSystem fs) throws IOException {
@@ -98,7 +98,7 @@ public class HFileArchiveTestingUtil {
* @param actual files that were archived
* @param fs {@link FileSystem} on which the archiving took place
* @param hasTimedBackup true if we expect to find an archive backup directory with a
- * copy of the files in the archive directory (and the original files). n
+ * copy of the files in the archive directory (and the original files).
*/
public static void assertArchiveEqualToOriginal(FileStatus[] expected, FileStatus[] actual,
FileSystem fs, boolean hasTimedBackup) throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
index d8f90160d27..5e8447c2ad8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
@@ -118,7 +118,7 @@ public class ProcessBasedLocalHBaseCluster {
/**
* Makes this local HBase cluster use a mini-DFS cluster. Must be called before
- * {@link #startHBase()}. n
+ * {@link #startHBase()}.
*/
public void startMiniDFS() throws Exception {
if (testUtil == null) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
index 7a3c71fc4aa..ffd9a0e0a38 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
@@ -498,7 +498,7 @@ public class TestFSUtils {
/**
* Ugly test that ensures we can get at the hedged read counters in dfsclient. Does a bit of
- * preading with hedged reads enabled using code taken from hdfs TestPread. n
+ * preading with hedged reads enabled using code taken from hdfs TestPread.
*/
@Test
public void testDFSHedgedReadMetrics() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
index efca6564660..41e98464bfd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
@@ -80,7 +80,7 @@ public class HbckTestingUtil {
/**
* Runs hbck with the -sidelineCorruptHFiles option
- * @param table table constraint n
+ * @param table table constraint
*/
public static HBaseFsck doHFileQuarantine(Configuration conf, TableName table) throws Exception {
String[] args =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java
index e7283d2d9aa..ec7a5f11e40 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java
@@ -82,7 +82,7 @@ public abstract class LoadTestDataGenerator {
}
/**
- * initialize the LoadTestDataGenerator n * init args
+ * initialize the LoadTestDataGenerator init args
*/
public void initialize(String[] args) {
this.args = args;
@@ -140,16 +140,16 @@ public abstract class LoadTestDataGenerator {
public abstract boolean verify(byte[] rowKey, byte[] cf, byte[] column, byte[] value);
/**
- * Giving a chance for the LoadTestDataGenerator to change the Mutation load. nn * @return updated
- * Mutation n
+ * Giving a chance for the LoadTestDataGenerator to change the Mutation load.
+ * @return updated Mutation
*/
public Mutation beforeMutate(long rowkeyBase, Mutation m) throws IOException {
return m;
}
/**
- * Giving a chance for the LoadTestDataGenerator to change the Get load. nn * @return updated Get
- * n
+ * Giving a chance for the LoadTestDataGenerator to change the Get load.
+ * @return updated Get
*/
public Get beforeGet(long rowkeyBase, Get get) throws IOException {
return get;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java
index 4a19ea039b9..850d115d90d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java
@@ -179,7 +179,7 @@ public class IOTestProvider implements WALProvider {
* @param prefix should always be hostname and port in distributed env and it will be
* URL encoded before being used. If prefix is null, "wal" will be used
* @param suffix will be url encoded. null is treated as empty. non-empty must start
- * with {@link AbstractFSWALProvider#WAL_FILE_NAME_DELIMITER} n
+ * with {@link AbstractFSWALProvider#WAL_FILE_NAME_DELIMITER}
*/
public IOTestWAL(final FileSystem fs, final Path rootDir, final String logDir,
final String archiveDir, final Configuration conf, final List listeners,
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java
index 994a02e2a24..eb76fb2ae0f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java
@@ -161,7 +161,7 @@ public class TestFSHLogProvider {
}
/**
- * used by TestDefaultWALProviderWithHLogKey n
+ * used by TestDefaultWALProviderWithHLogKey
*/
WALKeyImpl getWalKey(final byte[] info, final TableName tableName, final long timestamp,
NavigableMap scopes) {
@@ -169,7 +169,7 @@ public class TestFSHLogProvider {
}
/**
- * helper method to simulate region flush for a WAL. nn
+ * helper method to simulate region flush for a WAL.
*/
protected void flushRegion(WAL wal, byte[] regionEncodedName, Set flushedFamilyNames) {
wal.startCacheFlush(regionEncodedName, flushedFamilyNames);
@@ -254,7 +254,6 @@ public class TestFSHLogProvider {
* eligible for archiving if for all the regions which have entries in that wal file, have flushed
* - past their maximum sequence id in that wal file.
*
- * n
*/
@Test
public void testWALArchiving() throws IOException {
@@ -329,7 +328,7 @@ public class TestFSHLogProvider {
}
/**
- * Write to a log file with three concurrent threads and verifying all data is written. n
+ * Write to a log file with three concurrent threads and verifying all data is written.
*/
@Test
public void testConcurrentWrites() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
index c2f92eba666..f2f73c37c6f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
@@ -175,7 +175,7 @@ public class TestWALFactory {
}
/**
- * Just write multiple logs then split. Before fix for HADOOP-2283, this would fail. n
+ * Just write multiple logs then split. Before fix for HADOOP-2283, this would fail.
*/
@Test
public void testSplit() throws IOException {
@@ -231,7 +231,7 @@ public class TestWALFactory {
}
/**
- * Test new HDFS-265 sync. n
+ * Test new HDFS-265 sync.
*/
@Test
public void Broken_testSync() throws Exception {
@@ -604,7 +604,7 @@ public class TestWALFactory {
}
/**
- * Test that we can visit entries before they are appended n
+ * Test that we can visit entries before they are appended
*/
@Test
public void testVisitors() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALMethods.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALMethods.java
index 26790e57dc2..8273b3d6041 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALMethods.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALMethods.java
@@ -79,7 +79,7 @@ public class TestWALMethods {
/**
* Assert that getSplitEditFilesSorted returns files in expected order and that it skips
- * moved-aside files. n
+ * moved-aside files.
*/
@Test
public void testGetSplitEditFilesSorted() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALOpenAfterDNRollingStart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALOpenAfterDNRollingStart.java
index 312c3ff6362..8ea47ebd828 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALOpenAfterDNRollingStart.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALOpenAfterDNRollingStart.java
@@ -98,7 +98,7 @@ public class TestWALOpenAfterDNRollingStart {
* all datanode restarted (rolling upgrade, for example). Before this patch, low replication
* detection is only used when syncing wal. But if the wal haven't had any entry whiten, it will
* never know all the replica of the wal is broken(because of dn restarting). And this wal can
- * never be open n
+ * never be open
*/
@Test
public void test() throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
index 46055cea66b..fedfaffca5b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
@@ -955,8 +955,8 @@ public class TestWALSplit {
/**
* Sets up a log splitter with a mock reader and writer. The mock reader generates a specified
* number of edits spread across 5 regions. The mock writer optionally sleeps for each edit it is
- * fed. * After the split is complete, verifies that the statistics show the correct number of
- * edits output into each region.
+ * fed. After the split is complete, verifies that the statistics show the correct number of edits
+ * output into each region.
* @param numFakeEdits number of fake edits to push through pipeline
* @param bufferSize size of in-memory buffer
* @param writerSlowness writer threads will sleep this many ms per edit
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
index 8dab08d9132..f975f066e26 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
@@ -372,7 +372,8 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
/**
* Verify the content of the WAL file. Verify that the file has expected number of edits.
- * @param wals may not be null n * @return Count of edits. n
+ * @param wals may not be null
+ * @return Count of edits.
*/
private long verify(final WALFactory wals, final Path wal, final boolean verbose)
throws IOException {
@@ -536,7 +537,7 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
/**
* The guts of the {@link #main} method. Call this method to avoid the {@link #main(String[])}
- * System.exit. nnn
+ * System.exit.
*/
static int innerMain(final Configuration c, final String[] args) throws Exception {
return ToolRunner.run(c, new WALPerformanceEvaluation(), args);
diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java
index c33193a36c4..27f3dd4f43a 100644
--- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java
+++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java
@@ -373,7 +373,7 @@ public abstract class HBaseCluster implements Closeable, Configurable {
public abstract void close() throws IOException;
/**
- * Wait for the namenode. n
+ * Wait for the namenode.
*/
public void waitForNamenodeAvailable() throws InterruptedException {
}
diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index d3f5a51dc4a..9b4fecf41f9 100644
--- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -495,7 +495,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Cleans the test data directory on the test filesystem.
- * @return True if we removed the test dirs n
+ * @return True if we removed the test dirs
*/
public boolean cleanupDataTestDirOnTestFS() throws IOException {
boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
@@ -505,7 +505,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Cleans a subdirectory under the test data directory on the test filesystem.
- * @return True if we removed child n
+ * @return True if we removed child
*/
public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
Path cpath = getDataTestDirOnTestFS(subdirName);
@@ -564,7 +564,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Start a minidfscluster.
- * @param servers How many DNs to start. n * @see #shutdownMiniDFSCluster()
+ * @param servers How many DNs to start.
+ * @see #shutdownMiniDFSCluster()
* @return The mini dfs cluster created.
*/
public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
@@ -575,7 +576,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Start a minidfscluster. This is useful if you want to run datanode on distinct hosts for things
* like HDFS block location verification. If you start MiniDFSCluster without host names, all
* instances of the datanodes will have the same host name.
- * @param hosts hostnames DNs to run on. n * @see #shutdownMiniDFSCluster()
+ * @param hosts hostnames DNs to run on.
+ * @see #shutdownMiniDFSCluster()
* @return The mini dfs cluster created.
*/
public MiniDFSCluster startMiniDFSCluster(final String hosts[]) throws Exception {
@@ -589,7 +591,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Start a minidfscluster. Can only create one.
* @param servers How many DNs to start.
- * @param hosts hostnames DNs to run on. n * @see #shutdownMiniDFSCluster()
+ * @param hosts hostnames DNs to run on.
+ * @see #shutdownMiniDFSCluster()
* @return The mini dfs cluster created.
*/
public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[]) throws Exception {
@@ -762,7 +765,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
}
/**
- * Shuts down instance created by call to {@link #startMiniDFSCluster(int)} or does nothing. n
+ * Shuts down instance created by call to {@link #startMiniDFSCluster(int)} or does nothing.
*/
public void shutdownMiniDFSCluster() throws IOException {
if (this.dfsCluster != null) {
@@ -1332,7 +1335,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Returns the path to the default root dir the minicluster uses. If create is true,
* a new root directory path is fetched irrespective of whether it has been fetched before or not.
* If false, previous path is used. Note: this does not cause the root dir to be created.
- * @return Fully qualified path for the default hbase root dir n
+ * @return Fully qualified path for the default hbase root dir
*/
public Path getDefaultRootDirPath(boolean create) throws IOException {
if (!create) {
@@ -1345,7 +1348,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)} except that
* create flag is false. Note: this does not cause the root dir to be created.
- * @return Fully qualified path for the default hbase root dir n
+ * @return Fully qualified path for the default hbase root dir
*/
public Path getDefaultRootDirPath() throws IOException {
return getDefaultRootDirPath(false);
@@ -1358,7 +1361,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* @param create This flag decides whether to get a new root or data directory path or not, if it
* has been fetched already. Note : Directory will be made irrespective of whether
* path has been fetched or not. If directory already exists, it will be overwritten
- * @return Fully qualified path to hbase root dir n
+ * @return Fully qualified path to hbase root dir
*/
public Path createRootDir(boolean create) throws IOException {
FileSystem fs = FileSystem.get(this.conf);
@@ -1372,7 +1375,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Same as {@link HBaseTestingUtility#createRootDir(boolean create)} except that
* create flag is false.
- * @return Fully qualified path to hbase root dir n
+ * @return Fully qualified path to hbase root dir
*/
public Path createRootDir() throws IOException {
return createRootDir(false);
@@ -1382,7 +1385,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Creates a hbase walDir in the user's home directory. Normally you won't make use of this
* method. Root hbaseWALDir is created for you as part of mini cluster startup. You'd only use
* this method if you were doing manual operation.
- * @return Fully qualified path to hbase root dir n
+ * @return Fully qualified path to hbase root dir
*/
public Path createWALRootDir() throws IOException {
FileSystem fs = FileSystem.get(this.conf);
@@ -1431,14 +1434,16 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
}
/**
- * Create a table. nn * @return A Table instance for the created table. n
+ * Create a table.
+ * @return A Table instance for the created table.
*/
public Table createTable(TableName tableName, String family) throws IOException {
return createTable(tableName, new String[] { family });
}
/**
- * Create a table. nn * @return A Table instance for the created table. n
+ * Create a table.
+ * @return A Table instance for the created table.
*/
public Table createTable(TableName tableName, String[] families) throws IOException {
List fams = new ArrayList<>(families.length);
@@ -1449,14 +1454,16 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
}
/**
- * Create a table. nn * @return A Table instance for the created table. n
+ * Create a table.
+ * @return A Table instance for the created table.
*/
public Table createTable(TableName tableName, byte[] family) throws IOException {
return createTable(tableName, new byte[][] { family });
}
/**
- * Create a table with multiple regions. nnn * @return A Table instance for the created table. n
+ * Create a table with multiple regions.
+ * @return A Table instance for the created table.
*/
public Table createMultiRegionTable(TableName tableName, byte[] family, int numRegions)
throws IOException {
@@ -1469,22 +1476,25 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
}
/**
- * Create a table. nn * @return A Table instance for the created table. n
+ * Create a table.
+ * @return A Table instance for the created table.
*/
public Table createTable(TableName tableName, byte[][] families) throws IOException {
return createTable(tableName, families, (byte[][]) null);
}
/**
- * Create a table with multiple regions. nn * @return A Table instance for the created table. n
+ * Create a table with multiple regions.
+ * @return A Table instance for the created table.
*/
public Table createMultiRegionTable(TableName tableName, byte[][] families) throws IOException {
return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE);
}
/**
- * Create a table with multiple regions. n * @param replicaCount replica count. n * @return A
- * Table instance for the created table. n
+ * Create a table with multiple regions.
+ * @param replicaCount replica count.
+ * @return A Table instance for the created table.
*/
public Table createMultiRegionTable(TableName tableName, int replicaCount, byte[][] families)
throws IOException {
@@ -1492,7 +1502,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
}
/**
- * Create a table. nnn * @return A Table instance for the created table. n
+ * Create a table.
+ * @return A Table instance for the created table.
*/
public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys)
throws IOException {
@@ -1591,7 +1602,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Create a table.
* @param htd table descriptor
* @param splitRows array of split keys
- * @return A Table instance for the created table. n
+ * @return A Table instance for the created table.
*/
public Table createTable(TableDescriptor htd, byte[][] splitRows) throws IOException {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
@@ -1928,7 +1939,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* @param conf configuration
* @param desc table descriptor
* @param wal wal for this region.
- * @return created hregion n
+ * @return created hregion
*/
public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDescriptor desc,
WAL wal) throws IOException {
@@ -1936,8 +1947,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
}
/**
- * nnnnn * @return A region on which you must call
- * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. n
+ * Return a region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)}
+ * when done.
*/
public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families)
@@ -1978,7 +1989,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Provide an existing table name to truncate. Scans the table and issues a delete for each row
* read.
* @param tableName existing table
- * @return HTable to that new table n
+ * @return HTable to that new table
*/
public Table deleteTableData(TableName tableName) throws IOException {
Table table = getConnection().getTable(tableName);
@@ -2025,7 +2036,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Load table with rows from 'aaa' to 'zzz'.
* @param t Table
* @param f Family
- * @return Count of rows loaded. n
+ * @return Count of rows loaded.
*/
public int loadTable(final Table t, final byte[] f) throws IOException {
return loadTable(t, new byte[][] { f });
@@ -2035,7 +2046,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Load table with rows from 'aaa' to 'zzz'.
* @param t Table
* @param f Family
- * @return Count of rows loaded. n
+ * @return Count of rows loaded.
*/
public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
return loadTable(t, new byte[][] { f }, null, writeToWAL);
@@ -2045,7 +2056,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Load table of multiple column families with rows from 'aaa' to 'zzz'.
* @param t Table
* @param f Array of Families to load
- * @return Count of rows loaded. n
+ * @return Count of rows loaded.
*/
public int loadTable(final Table t, final byte[][] f) throws IOException {
return loadTable(t, f, null);
@@ -2056,7 +2067,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* @param t Table
* @param f Array of Families to load
* @param value the values of the cells. If null is passed, the row key is used as value
- * @return Count of rows loaded. n
+ * @return Count of rows loaded.
*/
public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
return loadTable(t, f, value, true);
@@ -2067,7 +2078,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* @param t Table
* @param f Array of Families to load
* @param value the values of the cells. If null is passed, the row key is used as value
- * @return Count of rows loaded. n
+ * @return Count of rows loaded.
*/
public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL)
throws IOException {
@@ -2154,7 +2165,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* @param r Region
* @param f Family
* @param flush flush the cache if true
- * @return Count of rows loaded. n
+ * @return Count of rows loaded.
*/
public int loadRegion(final HRegion r, final byte[] f, final boolean flush) throws IOException {
byte[] k = new byte[3];
@@ -2403,7 +2414,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Create rows in hbase:meta for regions of the specified table with the specified start keys. The
* first startKey should be a 0 length byte array if you want to form a proper range of regions.
- * nnn * @return list of region info for regions added to meta n
+ * @return list of region info for regions added to meta
*/
public List createMultiRegionsInMeta(final Configuration conf,
final TableDescriptor htd, byte[][] startKeys) throws IOException {
@@ -2540,9 +2551,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
}
}
- /*
- * Find any other region server which is different from the one identified by parameter n
- * * @return another region server
+ /**
+ * Find any other region server which is different from the one identified by parameter
+ * @return another region server
*/
public HRegionServer getOtherRegionServer(HRegionServer rs) {
for (JVMClusterUtil.RegionServerThread rst : getMiniHBaseCluster().getRegionServerThreads()) {
@@ -2557,7 +2568,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Tool to get the reference to the region server object that holds the region of the specified
* user table.
* @param tableName user table to lookup in hbase:meta
- * @return region server that holds it, null if the row doesn't exist nn
+ * @return region server that holds it, null if the row doesn't exist
*/
public HRegionServer getRSForFirstRegionInTable(TableName tableName)
throws IOException, InterruptedException {
@@ -2750,7 +2761,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
}
/**
- * Expire the Master's session n
+ * Expire the Master's session
*/
public void expireMasterSession() throws Exception {
HMaster master = getMiniHBaseCluster().getMaster();
@@ -2992,7 +3003,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Closes the region containing the given row.
* @param row The row to find the containing region.
- * @param table The table to find the region. n
+ * @param table The table to find the region.
*/
public void unassignRegionByRow(byte[] row, RegionLocator table) throws IOException {
HRegionLocation hrl = table.getRegionLocation(row);
@@ -3069,7 +3080,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Wait until all regions in a table have been assigned. Waits default timeout before giving up
* (30 seconds).
- * @param table Table to wait on. nn
+ * @param table Table to wait on.
*/
public void waitTableAvailable(TableName table) throws InterruptedException, IOException {
waitTableAvailable(table.getName(), 30000);
@@ -3166,7 +3177,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* have been all assigned.
* @see #waitTableEnabled(TableName, long)
* @param table Table to wait on.
- * @param timeoutMillis Time to wait on it being marked enabled. nn
+ * @param timeoutMillis Time to wait on it being marked enabled.
*/
public void waitTableEnabled(byte[] table, long timeoutMillis)
throws InterruptedException, IOException {
@@ -3180,7 +3191,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' Will timeout
* after default period (30 seconds)
- * @param table Table to wait on. nn
+ * @param table Table to wait on.
*/
public void waitTableDisabled(byte[] table) throws InterruptedException, IOException {
waitTableDisabled(table, 30000);
@@ -3194,7 +3205,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled'
* @param table Table to wait on.
- * @param timeoutMillis Time to wait on it being marked disabled. nn
+ * @param timeoutMillis Time to wait on it being marked disabled.
*/
public void waitTableDisabled(byte[] table, long timeoutMillis)
throws InterruptedException, IOException {
@@ -3204,7 +3215,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Make sure that at least the specified number of region servers are running
* @param num minimum number of region servers that should be running
- * @return true if we started some servers n
+ * @return true if we started some servers
*/
public boolean ensureSomeRegionServersAvailable(final int num) throws IOException {
boolean startedServer = false;
@@ -3221,7 +3232,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Make sure that at least the specified number of region servers are running. We don't count the
* ones that are currently stopping or are stopped.
* @param num minimum number of region servers that should be running
- * @return true if we started some servers n
+ * @return true if we started some servers
*/
public boolean ensureSomeNonStoppedRegionServersAvailable(final int num) throws IOException {
boolean startedServer = ensureSomeRegionServersAvailable(num);
@@ -3248,7 +3259,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Use it getting new instances of FileSystem. Only works for DistributedFileSystem w/o Kerberos.
* @param c Initial configuration
* @param differentiatingSuffix Suffix to differentiate this user from others.
- * @return A new configuration instance with a different user set into it. n
+ * @return A new configuration instance with a different user set into it.
*/
public static User getDifferentUser(final Configuration c, final String differentiatingSuffix)
throws IOException {
@@ -3290,7 +3301,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* failed 4 times. Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
*
*
- * @param stream A DFSClient.DFSOutputStream. nnnnn
+ * @param stream A DFSClient.DFSOutputStream.
*/
public static void setMaxRecoveryErrorCount(final OutputStream stream, final int max) {
try {
@@ -3327,7 +3338,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Move region to destination server and wait till region is completely moved and online
* @param destRegion region to move
- * @param destServer destination server of the region nn
+ * @param destServer destination server of the region
*/
public void moveRegionAndWait(RegionInfo destRegion, ServerName destServer)
throws InterruptedException, IOException {
@@ -3349,7 +3360,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Wait until all regions for a table in hbase:meta have a non-empty info:server, up to a
* configuable timeout value (default is 60 seconds) This means all regions have been deployed,
* master has been informed and updated hbase:meta with the regions deployed server.
- * @param tableName the table name n
+ * @param tableName the table name
*/
public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
waitUntilAllRegionsAssigned(tableName,
@@ -3357,7 +3368,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
}
/**
- * Waith until all system table's regions get assigned n
+ * Waith until all system table's regions get assigned
*/
public void waitUntilAllSystemRegionsAssigned() throws IOException {
waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
@@ -3368,7 +3379,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* timeout. This means all regions have been deployed, master has been informed and updated
* hbase:meta with the regions deployed server.
* @param tableName the table name
- * @param timeout timeout, in milliseconds n
+ * @param timeout timeout, in milliseconds
*/
public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
throws IOException {
@@ -3477,8 +3488,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
}
/**
- * Create region split keys between startkey and endKey nn * @param numRegions the number of
- * regions to be created. it has to be greater than 3.
+ * Create region split keys between startkey and endKey
+ * @param numRegions the number of regions to be created. it has to be greater than 3.
* @return resulting split keys
*/
public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions) {
@@ -3957,21 +3968,21 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Wait until no regions in transition.
- * @param timeout How long to wait. n
+ * @param timeout How long to wait.
*/
public void waitUntilNoRegionsInTransition(final long timeout) throws IOException {
waitFor(timeout, predicateNoRegionsInTransition());
}
/**
- * Wait until no regions in transition. (time limit 15min) n
+ * Wait until no regions in transition. (time limit 15min)
*/
public void waitUntilNoRegionsInTransition() throws IOException {
waitUntilNoRegionsInTransition(15 * 60000);
}
/**
- * Wait until labels is ready in VisibilityLabelsCache. nn
+ * Wait until labels is ready in VisibilityLabelsCache.
*/
public void waitLabelAvailable(long timeoutMillis, final String... labels) {
final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get();
diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
index 4a98bb31e16..42036fbc79b 100644
--- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
+++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
@@ -62,7 +62,7 @@ public class MiniHBaseCluster extends HBaseCluster {
/**
* Start a MiniHBaseCluster.
* @param conf Configuration to be used for cluster
- * @param numRegionServers initial number of region servers to start. n
+ * @param numRegionServers initial number of region servers to start.
*/
public MiniHBaseCluster(Configuration conf, int numRegionServers)
throws IOException, InterruptedException {
@@ -73,7 +73,7 @@ public class MiniHBaseCluster extends HBaseCluster {
* Start a MiniHBaseCluster.
* @param conf Configuration to be used for cluster
* @param numMasters initial number of masters to start.
- * @param numRegionServers initial number of region servers to start. n
+ * @param numRegionServers initial number of region servers to start.
*/
public MiniHBaseCluster(Configuration conf, int numMasters, int numRegionServers)
throws IOException, InterruptedException {
@@ -97,7 +97,7 @@ public class MiniHBaseCluster extends HBaseCluster {
* @param rsPorts Ports that RegionServer should use; pass ports if you want to test cluster
* restart where for sure the regionservers come up on same address+port (but just
* with different startcode); by default mini hbase clusters choose new arbitrary
- * ports on each cluster start. nn
+ * ports on each cluster start.
*/
public MiniHBaseCluster(Configuration conf, int numMasters, int numAlwaysStandByMasters,
int numRegionServers, List rsPorts, Class extends HMaster> masterClass,
@@ -139,9 +139,9 @@ public class MiniHBaseCluster extends HBaseCluster {
}
/*
- * n * @param currentfs We return this if we did not make a new one.
+ * @param currentfs We return this if we did not make a new one.
* @param uniqueName Same name used to help identify the created fs.
- * @return A new fs instance if we are up on DistributeFileSystem. n
+ * @return A new fs instance if we are up on DistributeFileSystem.
*/
@Override
@@ -400,7 +400,8 @@ public class MiniHBaseCluster extends HBaseCluster {
}
/**
- * Starts a region server thread running n * @return New RegionServerThread
+ * Starts a region server thread running
+ * @return New RegionServerThread
*/
public JVMClusterUtil.RegionServerThread startRegionServer() throws IOException {
final Configuration newConf = HBaseConfiguration.create(conf);
@@ -510,7 +511,7 @@ public class MiniHBaseCluster extends HBaseCluster {
/**
* Wait for the specified region server to stop. Removes this thread from list of running threads.
- * n * @return Name of region server that just went down.
+ * @return Name of region server that just went down.
*/
public String waitOnRegionServer(final int serverNumber) {
return this.hbaseCluster.waitOnRegionServer(serverNumber);
@@ -604,8 +605,8 @@ public class MiniHBaseCluster extends HBaseCluster {
}
/**
- * Wait for the specified master to stop. Removes this thread from list of running threads. n
- * * @return Name of master that just went down.
+ * Wait for the specified master to stop. Removes this thread from list of running threads.
+ * @return Name of master that just went down.
*/
public String waitOnMaster(final int serverNumber) {
return this.hbaseCluster.waitOnMaster(serverNumber);
@@ -613,7 +614,7 @@ public class MiniHBaseCluster extends HBaseCluster {
/**
* Blocks until there is an active master and that master has completed initialization.
- * @return true if an active master becomes available. false if there are no masters left. n
+ * @return true if an active master becomes available. false if there are no masters left.
*/
@Override
public boolean waitForActiveAndReadyMaster(long timeout) throws IOException {
@@ -710,7 +711,7 @@ public class MiniHBaseCluster extends HBaseCluster {
}
/**
- * Call flushCache on all regions on all participating regionservers. n
+ * Call flushCache on all regions on all participating regionservers.
*/
public void compact(boolean major) throws IOException {
for (JVMClusterUtil.RegionServerThread t : this.hbaseCluster.getRegionServers()) {
@@ -723,7 +724,7 @@ public class MiniHBaseCluster extends HBaseCluster {
}
/**
- * Call flushCache on all regions of the specified table. n
+ * Call flushCache on all regions of the specified table.
*/
public void compact(TableName tableName, boolean major) throws IOException {
for (JVMClusterUtil.RegionServerThread t : this.hbaseCluster.getRegionServers()) {
@@ -756,7 +757,8 @@ public class MiniHBaseCluster extends HBaseCluster {
}
/**
- * Grab a numbered region server of your choice. n * @return region server
+ * Grab a numbered region server of your choice.
+ * @return region server
*/
public HRegionServer getRegionServer(int serverNumber) {
return hbaseCluster.getRegionServer(serverNumber);
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HBaseServiceHandler.java
index 93468dc4a43..79f5d4ee830 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HBaseServiceHandler.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HBaseServiceHandler.java
@@ -66,7 +66,7 @@ public abstract class HBaseServiceHandler {
}
/**
- * Creates and returns a Table instance from a given table name. n * name of table
+ * Creates and returns a Table instance from a given table name. name of table
* @return Table object
* @throws IOException if getting the table fails
*/
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
index 38edd98ddd8..ba44113bfa5 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
@@ -55,8 +55,8 @@ public final class ThriftUtilities {
/**
* This utility method creates a new Hbase HColumnDescriptor object based on a Thrift
* ColumnDescriptor "struct".
- * @param in Thrift ColumnDescriptor object n * @throws IllegalArgument if the column name is
- * empty
+ * @param in Thrift ColumnDescriptor object
+ * @throws IllegalArgument if the column name is empty
*/
public static ColumnFamilyDescriptor colDescFromThrift(ColumnDescriptor in)
throws IllegalArgument {
@@ -76,7 +76,7 @@ public final class ThriftUtilities {
/**
* This utility method creates a new Thrift ColumnDescriptor "struct" based on an Hbase
- * HColumnDescriptor object. n * Hbase HColumnDescriptor object
+ * HColumnDescriptor object. Hbase HColumnDescriptor object
* @return Thrift ColumnDescriptor
*/
public static ColumnDescriptor colDescFromHbase(ColumnFamilyDescriptor in) {
@@ -93,7 +93,7 @@ public final class ThriftUtilities {
/**
* This utility method creates a list of Thrift TCell "struct" based on an Hbase Cell object. The
- * empty list is returned if the input is null. n * Hbase Cell object
+ * empty list is returned if the input is null. Hbase Cell object
* @return Thrift TCell array
*/
public static List cellFromHBase(Cell in) {
@@ -125,10 +125,10 @@ public final class ThriftUtilities {
/**
* This utility method creates a list of Thrift TRowResult "struct" based on an Hbase RowResult
- * object. The empty list is returned if the input is null. n * Hbase RowResult object n * This
- * boolean dictates if row data is returned in a sorted order sortColumns = True will set
- * TRowResult's sortedColumns member which is an ArrayList of TColumn struct sortColumns = False
- * will set TRowResult's columns member which is a map of columnName and TCell struct
+ * object. The empty list is returned if the input is null. Hbase RowResult object This boolean
+ * dictates if row data is returned in a sorted order sortColumns = True will set TRowResult's
+ * sortedColumns member which is an ArrayList of TColumn struct sortColumns = False will set
+ * TRowResult's columns member which is a map of columnName and TCell struct
* @return Thrift TRowResult array
*/
public static List rowResultFromHBase(Result[] in, boolean sortColumns) {
@@ -167,8 +167,8 @@ public final class ThriftUtilities {
/**
* This utility method creates a list of Thrift TRowResult "struct" based on an array of Hbase
- * RowResult objects. The empty list is returned if the input is null. n * Array of Hbase
- * RowResult objects
+ * RowResult objects. The empty list is returned if the input is null. Array of Hbase RowResult
+ * objects
* @return Thrift TRowResult array
*/
public static List | | | | | |