HBASE-26922 Fix LineLength warnings as much as possible if it can not be fixed by spotless (#4324)

Signed-off-by: Yulin Niu <niuyulin@apache.org
(cherry picked from commit 3ae0d9012c)
This commit is contained in:
Duo Zhang 2022-04-09 21:38:41 +08:00
parent abb1bf2617
commit 913dd9c305
52 changed files with 431 additions and 402 deletions

View File

@ -69,8 +69,8 @@ public class StreamSlowMonitor implements ConfigurationObserver {
*/
private static final String DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY =
"hbase.regionserver.async.wal.datanode.slow.check.speed.packet.data.length.min";
private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH =
64 * 1024; //64KB
// 64KB
private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH = 64 * 1024;
/**
* Configure for the slow packet process time, a duration from send to ACK.
@ -79,7 +79,8 @@ public class StreamSlowMonitor implements ConfigurationObserver {
*/
public static final String DATANODE_SLOW_PACKET_PROCESS_TIME_KEY =
"hbase.regionserver.async.wal.datanode.slow.packet.process.time.millis";
private static final long DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME = 6000; // 6s in ms
// 6s in ms
private static final long DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME = 6000;
/**
* Configure for the check of large packet(which is configured by
@ -89,7 +90,8 @@ public class StreamSlowMonitor implements ConfigurationObserver {
*/
private static final String DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED_KEY =
"hbase.regionserver.async.wal.datanode.slow.packet.speed.min.kbs";
private static final double DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED = 20; // 20KB/s
// 20KB/s
private static final double DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED = 20;
private final String name;
// this is a map of datanodeInfo->queued slow PacketAckData

View File

@ -103,8 +103,8 @@ class CatalogReplicaLoadBalanceSimpleSelector implements
}
}
private final ConcurrentMap<TableName, ConcurrentNavigableMap<byte[], StaleLocationCacheEntry>>
staleCache = new ConcurrentHashMap<>();
private final ConcurrentMap<TableName,
ConcurrentNavigableMap<byte[], StaleLocationCacheEntry>> staleCache = new ConcurrentHashMap<>();
private volatile int numOfReplicas;
private final ChoreService choreService;
private final TableName tableName;

View File

@ -101,7 +101,6 @@ public class ColumnCountGetFilter extends FilterBase {
/**
* @param pbBytes A pb serialized {@link ColumnCountGetFilter} instance
* @return An instance of {@link ColumnCountGetFilter} made from <code>bytes</code>
* @throws org.apache.hadoop.hbase.exceptions.DeserializationException
* @see #toByteArray
*/
public static ColumnCountGetFilter parseFrom(final byte [] pbBytes)

View File

@ -241,8 +241,8 @@ public class FuzzyRowFilter extends FilterBase {
byte[] nextRow() {
if (nextRows.isEmpty()) {
throw new IllegalStateException(
"NextRows should not be empty, make sure to call nextRow() after updateTracker() return true");
throw new IllegalStateException("NextRows should not be empty, "
+ "make sure to call nextRow() after updateTracker() return true");
} else {
return nextRows.peek().getFirst();
}

View File

@ -399,7 +399,6 @@ public class SingleColumnValueFilter extends FilterBase {
/**
* @param pbBytes A pb serialized {@link SingleColumnValueFilter} instance
* @return An instance of {@link SingleColumnValueFilter} made from <code>bytes</code>
* @throws org.apache.hadoop.hbase.exceptions.DeserializationException
* @see #toByteArray
*/
public static SingleColumnValueFilter parseFrom(final byte [] pbBytes)

View File

@ -2958,9 +2958,7 @@ public final class ProtobufUtil {
}
/**
* Creates {@link CompactionState} from
* {@link org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState}
* state
* Creates {@link CompactionState} from {@link GetRegionInfoResponse.CompactionState} state
* @param state the protobuf CompactionState
* @return CompactionState
*/
@ -2973,9 +2971,7 @@ public final class ProtobufUtil {
}
/**
* Creates {@link CompactionState} from
* {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos
* .RegionLoad.CompactionState} state
* Creates {@link CompactionState} from {@link RegionLoad.CompactionState} state
* @param state the protobuf CompactionState
* @return CompactionState
*/
@ -2995,8 +2991,7 @@ public final class ProtobufUtil {
}
/**
* Creates {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type}
* from {@link SnapshotType}
* Creates {@link SnapshotProtos.SnapshotDescription.Type} from {@link SnapshotType}
* @param type the SnapshotDescription type
* @return the protobuf SnapshotDescription type
*/
@ -3006,8 +3001,8 @@ public final class ProtobufUtil {
}
/**
* Creates {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type}
* from the type of SnapshotDescription string
* Creates {@link SnapshotProtos.SnapshotDescription.Type} from the type of SnapshotDescription
* string
* @param snapshotDesc string representing the snapshot description type
* @return the protobuf SnapshotDescription type
*/
@ -3017,18 +3012,16 @@ public final class ProtobufUtil {
}
/**
* Creates {@link SnapshotType} from the type of
* {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription}
* @param type the snapshot description type
* @return the protobuf SnapshotDescription type
* Creates {@link SnapshotType} from the {@link SnapshotProtos.SnapshotDescription.Type}
* @param type the snapshot description type
* @return the protobuf SnapshotDescription type
*/
public static SnapshotType createSnapshotType(SnapshotProtos.SnapshotDescription.Type type) {
return SnapshotType.valueOf(type.toString());
}
/**
* Convert from {@link SnapshotDescription} to
* {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription}
* Convert from {@link SnapshotDescription} to {@link SnapshotProtos.SnapshotDescription}
* @param snapshotDesc the POJO SnapshotDescription
* @return the protobuf SnapshotDescription
*/
@ -3062,9 +3055,7 @@ public final class ProtobufUtil {
}
/**
* Convert from
* {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} to
* {@link SnapshotDescription}
* Convert from {@link SnapshotProtos.SnapshotDescription} to {@link SnapshotDescription}
* @param snapshotDesc the protobuf SnapshotDescription
* @return the POJO SnapshotDescription
*/

View File

@ -38,9 +38,9 @@ import org.apache.yetus.audience.InterfaceStability;
* {@link BBKVComparator}. Use it when mostly {@link ByteBufferKeyValue}s.
* </p>
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
value="UNKNOWN",
justification="Findbugs doesn't like the way we are negating the result of a compare in below")
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "UNKNOWN",
justification = "Findbugs doesn't like the way we are negating the result of"
+ " a compare in below")
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class CellComparatorImpl implements CellComparator {

View File

@ -73,13 +73,14 @@ public interface ExtendedCell extends RawCell, HeapSize {
}
/**
* @param withTags Whether to write tags.
* @return Bytes count required to serialize this Cell in a {@link KeyValue} format.
* <br> KeyValue format <br>
* KeyValue format
* <p/>
* <code>&lt;4 bytes keylength&gt; &lt;4 bytes valuelength&gt; &lt;2 bytes rowlength&gt;
* &lt;row&gt; &lt;1 byte columnfamilylength&gt; &lt;columnfamily&gt; &lt;columnqualifier&gt;
* &lt;8 bytes timestamp&gt; &lt;1 byte keytype&gt; &lt;value&gt; &lt;2 bytes tagslength&gt;
* &lt;tags&gt;</code>
* @param withTags Whether to write tags.
* @return Bytes count required to serialize this Cell in a {@link KeyValue} format.
*/
// TODO remove the boolean param once HBASE-16706 is done.
default int getSerializedSize(boolean withTags) {

View File

@ -2369,21 +2369,23 @@ public class KeyValue implements ExtendedCell, Cloneable {
/**
* HeapSize implementation
*
* <p/>
* We do not count the bytes in the rowCache because it should be empty for a KeyValue in the
* MemStore.
*/
@Override
public long heapSize() {
/*
* Deep object overhead for this KV consists of two parts. The first part is the KV object
* itself, while the second part is the backing byte[]. We will only count the array overhead
* from the byte[] only if this is the first KV in there.
*/
return ClassSize.align(FIXED_OVERHEAD) +
(offset == 0
? ClassSize.sizeOfByteArray(length) // count both length and object overhead
: length); // only count the number of bytes
// Deep object overhead for this KV consists of two parts. The first part is the KV object
// itself, while the second part is the backing byte[]. We will only count the array overhead
// from the byte[] only if this is the first KV in there.
int fixed = ClassSize.align(FIXED_OVERHEAD);
if (offset == 0) {
// count both length and object overhead
return fixed + ClassSize.sizeOfByteArray(length);
} else {
// only count the number of bytes
return fixed + length;
}
}
/**

View File

@ -1589,11 +1589,13 @@ public final class PrivateCellUtil {
}
private static class FirstOnRowCell extends EmptyCell {
// @formatter:off
private static final int FIXED_HEAPSIZE =
ClassSize.OBJECT // object
+ ClassSize.REFERENCE // row array
+ Bytes.SIZEOF_INT // row offset
+ Bytes.SIZEOF_SHORT; // row length
// @formatter:on
private final byte[] rowArray;
private final int roffset;
private final short rlength;
@ -1643,11 +1645,13 @@ public final class PrivateCellUtil {
}
private static class FirstOnRowByteBufferExtendedCell extends EmptyByteBufferExtendedCell {
// @formatter:off
private static final int FIXED_OVERHEAD =
ClassSize.OBJECT // object
+ ClassSize.REFERENCE // row buffer
+ Bytes.SIZEOF_INT // row offset
+ Bytes.SIZEOF_SHORT; // row length
// @formatter:on
private final ByteBuffer rowBuff;
private final int roffset;
private final short rlength;
@ -1698,11 +1702,12 @@ public final class PrivateCellUtil {
}
private static class LastOnRowByteBufferExtendedCell extends EmptyByteBufferExtendedCell {
private static final int FIXED_OVERHEAD =
ClassSize.OBJECT // object
// @formatter:off
private static final int FIXED_OVERHEAD = ClassSize.OBJECT // object
+ ClassSize.REFERENCE // rowBuff
+ Bytes.SIZEOF_INT // roffset
+ Bytes.SIZEOF_SHORT; // rlength
// @formatter:on
private final ByteBuffer rowBuff;
private final int roffset;
private final short rlength;
@ -1754,11 +1759,12 @@ public final class PrivateCellUtil {
private static class FirstOnRowColByteBufferExtendedCell
extends FirstOnRowByteBufferExtendedCell {
private static final int FIXED_OVERHEAD =
FirstOnRowByteBufferExtendedCell.FIXED_OVERHEAD
+ ClassSize.REFERENCE * 2 // family buffer and column buffer
+ Bytes.SIZEOF_INT * 3 // famOffset, colOffset, colLength
+ Bytes.SIZEOF_BYTE; // famLength
// @formatter:off
private static final int FIXED_OVERHEAD = FirstOnRowByteBufferExtendedCell.FIXED_OVERHEAD
+ ClassSize.REFERENCE * 2 // family buffer and column buffer
+ Bytes.SIZEOF_INT * 3 // famOffset, colOffset, colLength
+ Bytes.SIZEOF_BYTE; // famLength
// @formatter:on
private final ByteBuffer famBuff;
private final int famOffset;
private final byte famLength;
@ -1823,11 +1829,12 @@ public final class PrivateCellUtil {
}
private static class FirstOnRowColCell extends FirstOnRowCell {
private static final long FIXED_HEAPSIZE =
FirstOnRowCell.FIXED_HEAPSIZE
// @formatter:off
private static final long FIXED_HEAPSIZE = FirstOnRowCell.FIXED_HEAPSIZE
+ Bytes.SIZEOF_BYTE // flength
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
+ ClassSize.REFERENCE * 2; // fArray, qArray
// @formatter:on
private final byte[] fArray;
private final int foffset;
private final byte flength;
@ -1886,10 +1893,11 @@ public final class PrivateCellUtil {
}
private static class FirstOnRowColTSCell extends FirstOnRowColCell {
private static final long FIXED_HEAPSIZE =
FirstOnRowColCell.FIXED_HEAPSIZE
+ Bytes.SIZEOF_LONG; // ts
// @formatter:off
private static final long FIXED_HEAPSIZE = FirstOnRowColCell.FIXED_HEAPSIZE
+ Bytes.SIZEOF_LONG; // ts
private long ts;
// @formatter:on
public FirstOnRowColTSCell(byte[] rArray, int roffset, short rlength, byte[] fArray,
int foffset, byte flength, byte[] qArray, int qoffset, int qlength, long ts) {
@ -1910,10 +1918,11 @@ public final class PrivateCellUtil {
private static class FirstOnRowColTSByteBufferExtendedCell
extends FirstOnRowColByteBufferExtendedCell {
private static final int FIXED_OVERHEAD =
FirstOnRowColByteBufferExtendedCell.FIXED_OVERHEAD
+ Bytes.SIZEOF_LONG; // ts
// @formatter:off
private static final int FIXED_OVERHEAD = FirstOnRowColByteBufferExtendedCell.FIXED_OVERHEAD
+ Bytes.SIZEOF_LONG; // ts
private long ts;
// @formatter:on
public FirstOnRowColTSByteBufferExtendedCell(ByteBuffer rBuffer, int roffset, short rlength,
ByteBuffer fBuffer, int foffset, byte flength, ByteBuffer qBuffer, int qoffset, int qlength,
@ -1934,11 +1943,12 @@ public final class PrivateCellUtil {
}
private static class LastOnRowCell extends EmptyCell {
private static final int FIXED_OVERHEAD =
ClassSize.OBJECT // object
// @formatter:off
private static final int FIXED_OVERHEAD = ClassSize.OBJECT // object
+ ClassSize.REFERENCE // row array
+ Bytes.SIZEOF_INT // row offset
+ Bytes.SIZEOF_SHORT; // row length
// @formatter:on
private final byte[] rowArray;
private final int roffset;
private final short rlength;
@ -1988,10 +1998,12 @@ public final class PrivateCellUtil {
}
private static class LastOnRowColCell extends LastOnRowCell {
// @formatter:off
private static final long FIXED_OVERHEAD = LastOnRowCell.FIXED_OVERHEAD
+ ClassSize.REFERENCE * 2 // fArray and qArray
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
+ Bytes.SIZEOF_BYTE; // flength
+ ClassSize.REFERENCE * 2 // fArray and qArray
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
+ Bytes.SIZEOF_BYTE; // flength
// @formatter:on
private final byte[] fArray;
private final int foffset;
private final byte flength;
@ -2050,11 +2062,12 @@ public final class PrivateCellUtil {
}
private static class LastOnRowColByteBufferExtendedCell extends LastOnRowByteBufferExtendedCell {
private static final int FIXED_OVERHEAD =
LastOnRowByteBufferExtendedCell.FIXED_OVERHEAD
+ ClassSize.REFERENCE * 2 // fBuffer and qBuffer
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
+ Bytes.SIZEOF_BYTE; // flength
// @formatter:off
private static final int FIXED_OVERHEAD = LastOnRowByteBufferExtendedCell.FIXED_OVERHEAD
+ ClassSize.REFERENCE * 2 // fBuffer and qBuffer
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
+ Bytes.SIZEOF_BYTE; // flength
// @formatter:on
private final ByteBuffer fBuffer;
private final int foffset;
private final byte flength;
@ -2119,11 +2132,12 @@ public final class PrivateCellUtil {
}
private static class FirstOnRowDeleteFamilyCell extends EmptyCell {
private static final int FIXED_OVERHEAD =
ClassSize.OBJECT // object
// @formatter:off
private static final int FIXED_OVERHEAD = ClassSize.OBJECT // object
+ ClassSize.REFERENCE * 2 // fBuffer and qBuffer
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
+ Bytes.SIZEOF_BYTE; // flength
// @formatter:on
private final byte[] row;
private final byte[] fam;

View File

@ -164,17 +164,22 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
}
FileSystem fs = pathPattern.getFileSystem(conf);
Path pathPattern1 = fs.isDirectory(pathPattern) ?
new Path(pathPattern, "*.jar") : pathPattern; // append "*.jar" if a directory is specified
FileStatus[] fileStatuses = fs.globStatus(pathPattern1); // return all files that match the pattern
if (fileStatuses == null || fileStatuses.length == 0) { // if no one matches
// append "*.jar" if a directory is specified
Path pathPattern1 = fs.isDirectory(pathPattern) ? new Path(pathPattern, "*.jar") : pathPattern;
// return all files that match the pattern
FileStatus[] fileStatuses = fs.globStatus(pathPattern1);
if (fileStatuses == null || fileStatuses.length == 0) {
// if no one matches
throw new FileNotFoundException(pathPattern1.toString());
} else {
boolean validFileEncountered = false;
for (Path path : FileUtil.stat2Paths(fileStatuses)) { // for each file that match the pattern
if (fs.isFile(path)) { // only process files, skip for directories
File dst = new File(parentDirStr, "." + pathPrefix + "."
+ path.getName() + "." + EnvironmentEdgeManager.currentTime() + ".jar");
// for each file that match the pattern
for (Path path : FileUtil.stat2Paths(fileStatuses)) {
if (fs.isFile(path)) {
// only process files, skip for directories
File dst = new File(parentDirStr,
"." + pathPrefix + "." + path.getName() + "." + EnvironmentEdgeManager.currentTime()
+ ".jar");
fs.copyToLocalFile(path, new Path(dst.toString()));
dst.deleteOnExit();
@ -182,7 +187,8 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
JarFile jarFile = new JarFile(dst.toString());
try {
Enumeration<JarEntry> entries = jarFile.entries(); // get entries inside a jar file
// get entries inside a jar file
Enumeration<JarEntry> entries = jarFile.entries();
while (entries.hasMoreElements()) {
JarEntry entry = entries.nextElement();
Matcher m = libJarPattern.matcher(entry.getName());
@ -200,11 +206,12 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
} finally {
jarFile.close();
}
validFileEncountered = true; // Set to true when encountering a file
// Set to true when encountering a file
validFileEncountered = true;
}
}
if (validFileEncountered == false) { // all items returned by globStatus() are directories
if (validFileEncountered == false) {
// all items returned by globStatus() are directories
throw new FileNotFoundException("No file found matching " + pathPattern1.toString());
}
}

View File

@ -303,8 +303,8 @@ public class DemoClient {
m = new Mutation();
m.column = ByteBuffer.wrap(bytes("entry:sqr"));
m.isDelete = true;
client.mutateRowTs(demoTable, ByteBuffer.wrap(row), mutations, 1,
dummyAttributes); // shouldn't override latest
// shouldn't override latest
client.mutateRowTs(demoTable, ByteBuffer.wrap(row), mutations, 1, dummyAttributes);
printRow(client.getRow(demoTable, ByteBuffer.wrap(row), dummyAttributes));
List<TCell> versions = client.getVer(demoTable, ByteBuffer.wrap(row),

View File

@ -483,8 +483,8 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo
String HEDGED_READ_WINS_DESC =
"The number of times we started a hedged read and a hedged read won";
String HEDGED_READ_IN_CUR_THREAD = "hedgedReadOpsInCurThread";
String HEDGED_READ_IN_CUR_THREAD_DESC =
"The number of times we execute a hedged read in current thread as a fallback for task rejection";
String HEDGED_READ_IN_CUR_THREAD_DESC = "The number of times we execute a hedged read"
+ " in current thread as a fallback for task rejection";
String TOTAL_BYTES_READ = "totalBytesRead";
String TOTAL_BYTES_READ_DESC = "The total number of bytes read from HDFS";

View File

@ -86,21 +86,20 @@ public class IntegrationTestIngestWithACL extends IntegrationTestIngest {
tmp.add(sb.toString());
return tmp.toArray(new String[tmp.size()]);
}
@Override
protected void addOptions() {
super.addOptions();
super.addOptWithArg(OPT_SUPERUSER,
"Super user name used to add the ACL permissions");
super.addOptWithArg(OPT_SUPERUSER, "Super user name used to add the ACL permissions");
super.addOptWithArg(OPT_USERS,
"List of users to be added with the ACLs. Should be comma seperated.");
super
.addOptWithArg(
OPT_AUTHN,
"The name of the properties file that contains kerberos key tab file and principal definitions. " +
"The principal key in the file should be of the form hbase.<username>.kerberos.principal." +
" The keytab key in the file should be of the form hbase.<username>.keytab.file. Example: " +
"hbase.user1.kerberos.principal=user1/fully.qualified.domain.name@YOUR-REALM.COM, " +
"hbase.user1.keytab.file=<filelocation>.");
super.addOptWithArg(OPT_AUTHN,
"The name of the properties file that contains"
+ " kerberos key tab file and principal definitions. The principal key in the file"
+ " should be of the form hbase.<username>.kerberos.principal. The keytab key in the"
+ " file should be of the form hbase.<username>.keytab.file. Example:"
+ " hbase.user1.kerberos.principal=user1/fully.qualified.domain.name@YOUR-REALM.COM,"
+ " hbase.user1.keytab.file=<filelocation>.");
}
@Override

View File

@ -49,11 +49,12 @@ public class ServerAndDependenciesKillingMonkeyFactory extends MonkeyFactory {
loadProperties();
// Destructive actions to mess things around. Cannot run batch restart.
Action[] actions1 = new Action[]{
// @formatter:off
Action[] actions1 = new Action[] {
new RestartRandomRsExceptMetaAction(60000),
new RestartActiveMasterAction(5000),
new RollingBatchRestartRsAction(5000, 1.0f, 2,
true), // only allow 2 servers to be dead.
// only allow 2 servers to be dead.
new RollingBatchRestartRsAction(5000, 1.0f, 2, true),
new ForceBalancerAction(),
new RestartRandomDataNodeAction(60000),
new RestartRandomZKNodeAction(60000),
@ -61,6 +62,7 @@ public class ServerAndDependenciesKillingMonkeyFactory extends MonkeyFactory {
new RollingBatchSuspendResumeRsAction(rollingBatchSuspendRSSleepTime,
rollingBatchSuspendtRSRatio)
};
// @formatter:on
// Action to log more info for debugging
Action[] actions2 = new Action[]{

View File

@ -47,16 +47,18 @@ public class ServerKillingMonkeyFactory extends MonkeyFactory {
loadProperties();
// Destructive actions to mess things around. Cannot run batch restart
// @formatter:off
Action[] actions1 = new Action[] {
new RestartRandomRsExceptMetaAction(60000),
new RestartActiveMasterAction(5000),
new RollingBatchRestartRsAction(5000, 1.0f, 2,
true), //only allow 2 servers to be dead
new RestartRandomRsExceptMetaAction(60000),
new RestartActiveMasterAction(5000),
// only allow 2 servers to be dead
new RollingBatchRestartRsAction(5000, 1.0f, 2, true),
new ForceBalancerAction(),
new GracefulRollingRestartRsAction(gracefulRollingRestartTSSLeepTime),
new RollingBatchSuspendResumeRsAction(rollingBatchSuspendRSSleepTime,
rollingBatchSuspendtRSRatio)
};
// @formatter:on
// Action to log more info for debugging
Action[] actions2 = new Action[] {

View File

@ -42,7 +42,7 @@ import org.apache.hadoop.mapred.Reporter;
* A Base for {@link TableInputFormat}s. Receives a {@link Table}, a
* byte[] of input columns and optionally a {@link Filter}.
* Subclasses may use other TableRecordReader implementations.
*
* <p/>
* Subclasses MUST ensure initializeTable(Connection, TableName) is called for an instance to
* function properly. Each of the entry points to this class used by the MapReduce framework,
* {@link #getRecordReader(InputSplit, JobConf, Reporter)} and {@link #getSplits(JobConf, int)},
@ -98,8 +98,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
* Builds a TableRecordReader. If no TableRecordReader was provided, uses
* the default.
*
* @see org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit,
* JobConf, Reporter)
* @see InputFormat#getRecordReader(InputSplit, JobConf, Reporter)
*/
public RecordReader<ImmutableBytesWritable, Result> getRecordReader(
InputSplit split, JobConf job, Reporter reporter)
@ -165,7 +164,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
/**
* Calculates the splits that will serve as input for the map tasks.
*
* <p/>
* Splits are created in number equal to the smallest between numSplits and
* the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table.
* If the number of splits is smaller than the number of
@ -180,7 +179,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
*
* @return the input splits
*
* @see org.apache.hadoop.mapred.InputFormat#getSplits(org.apache.hadoop.mapred.JobConf, int)
* @see InputFormat#getSplits(org.apache.hadoop.mapred.JobConf, int)
*/
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
if (this.table == null) {
@ -282,7 +281,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
* will call {@link #initialize(JobConf)} as a convenient centralized location to handle
* retrieving the necessary configuration information and calling
* {@link #initializeTable(Connection, TableName)}.
*
* <p/>
* Subclasses should implement their initialize call such that it is safe to call multiple times.
* The current TableInputFormatBase implementation relies on a non-null table reference to decide
* if an initialize call is needed, but this behavior may change in the future. In particular,

View File

@ -72,9 +72,7 @@ public abstract class MultiTableInputFormatBase extends
* @return The newly created record reader.
* @throws IOException When creating the reader fails.
* @throws InterruptedException when record reader initialization fails
* @see org.apache.hadoop.mapreduce.InputFormat#createRecordReader(
* org.apache.hadoop.mapreduce.InputSplit,
* org.apache.hadoop.mapreduce.TaskAttemptContext)
* @see InputFormat#createRecordReader(InputSplit, TaskAttemptContext)
*/
@Override
public RecordReader<ImmutableBytesWritable, Result> createRecordReader(
@ -152,7 +150,7 @@ public abstract class MultiTableInputFormatBase extends
* @param context The current job context.
* @return The list of input splits.
* @throws IOException When creating the list of splits fails.
* @see org.apache.hadoop.mapreduce.InputFormat#getSplits(org.apache.hadoop.mapreduce.JobContext)
* @see InputFormat#getSplits(org.apache.hadoop.mapreduce.JobContext)
*/
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException {

View File

@ -49,8 +49,7 @@ import java.util.Map;
* included in each snapshot/scan
* pair.
* {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob
* (java.util.Map, Class, Class, Class, org.apache.hadoop.mapreduce.Job, boolean, org.apache
* .hadoop.fs.Path)}
* (Map, Class, Class, Class, org.apache.hadoop.mapreduce.Job, boolean, Path)}
* can be used to configure the job.
* <pre>{@code
* Job job = new Job(conf);

View File

@ -78,8 +78,7 @@ public class MultiTableSnapshotInputFormatImpl {
/**
* Return the list of splits extracted from the scans/snapshots pushed to conf by
* {@link
* #setInput(org.apache.hadoop.conf.Configuration, java.util.Map, org.apache.hadoop.fs.Path)}
* {@link #setInput(Configuration, Map, Path)}
*
* @param conf Configuration to determine splits from
* @return Return the list of splits extracted from the scans/snapshots pushed to conf
@ -115,7 +114,7 @@ public class MultiTableSnapshotInputFormatImpl {
/**
* Retrieve the snapshot name -&gt; list&lt;scan&gt; mapping pushed to configuration by
* {@link #setSnapshotToScans(org.apache.hadoop.conf.Configuration, java.util.Map)}
* {@link #setSnapshotToScans(Configuration, Map)}
*
* @param conf Configuration to extract name -&gt; list&lt;scan&gt; mappings from.
* @return the snapshot name -&gt; list&lt;scan&gt; mapping pushed to configuration

View File

@ -231,8 +231,9 @@ public class MultithreadedTableMapper<K2, V2> extends TableMapper<K2, V2> {
}
}
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION",
justification="Don't understand why FB is complaining about this one. We do throw exception")
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION",
justification = "Don't understand why FB is complaining about this one."
+ " We do throw exception")
private class MapRunner implements Runnable {
private Mapper<ImmutableBytesWritable, Result, K2,V2> mapper;
private Context subcontext;

View File

@ -65,10 +65,10 @@ import org.slf4j.LoggerFactory;
/**
* Scans a given table + CF for all mob reference cells to get the list of backing mob files.
* For each referenced file we attempt to verify that said file is on the FileSystem in a place
* that the MOB system will look when attempting to resolve the actual value.
*
* Scans a given table + CF for all mob reference cells to get the list of backing mob files. For
* each referenced file we attempt to verify that said file is on the FileSystem in a place that the
* MOB system will look when attempting to resolve the actual value.
* <p/>
* The job includes counters that can help provide a rough sketch of the mob data.
*
* <pre>
@ -94,31 +94,31 @@ import org.slf4j.LoggerFactory;
* Number of rows with total size in the 100,000s of bytes=6838
* Number of rows with total size in the 1,000,000s of bytes=3162
* </pre>
*
* * Map-Reduce Framework:Map input records - the number of rows with mob references
* * Map-Reduce Framework:Reduce output records - the number of unique hfiles referenced
* * MOB:NUM_CELLS - the total number of mob reference cells
* * PROBLEM:Affected rows - the number of rows that reference hfiles with an issue
* * PROBLEM:Problem MOB files - the number of unique hfiles that have an issue
* * CELLS PER ROW: - this counter group gives a histogram of the order of magnitude of the
* number of cells in a given row by grouping by the number of digits used in each count.
* This allows us to see more about the distribution of cells than what we can determine
* with just the cell count and the row count. In this particular example we can see that
* all of our rows have somewhere between 1 - 9 cells.
* * ROWS WITH PROBLEMS PER FILE: - this counter group gives a histogram of the order of
* magnitude of the number of rows in each of the hfiles with a problem. e.g. in the
* example there are 2 hfiles and they each have the same order of magnitude number of rows,
* specifically between 100 and 999.
* * SIZES OF CELLS: - this counter group gives a histogram of the order of magnitude of
* the size of mob values according to our reference cells. e.g. in the example above we
* have cell sizes that are all between 10,000 bytes and 9,999,999 bytes. From this
* histogram we can also see that _most_ cells are 100,000 - 999,000 bytes and the smaller
* and bigger ones are outliers making up less than 2% of mob cells.
* * SIZES OF ROWS: - this counter group gives a histogram of the order of magnitude of the
* size of mob values across each row according to our reference cells. In the example above
* we have rows that are are between 100,000 bytes and 9,999,999 bytes. We can also see that
* about 2/3rd of our rows are 100,000 - 999,999 bytes.
*
* <ol>
* <li>Map-Reduce Framework:Map input records - the number of rows with mob references</li>
* <li>Map-Reduce Framework:Reduce output records - the number of unique hfiles referenced</li>
* <li>MOB:NUM_CELLS - the total number of mob reference cells</li>
* <li>PROBLEM:Affected rows - the number of rows that reference hfiles with an issue</li>
* <li>PROBLEM:Problem MOB files - the number of unique hfiles that have an issue</li>
* <li>CELLS PER ROW: - this counter group gives a histogram of the order of magnitude of the number
* of cells in a given row by grouping by the number of digits used in each count. This allows us to
* see more about the distribution of cells than what we can determine with just the cell count and
* the row count. In this particular example we can see that all of our rows have somewhere between
* 1 - 9 cells.</li>
* <li>ROWS WITH PROBLEMS PER FILE: - this counter group gives a histogram of the order of magnitude
* of the number of rows in each of the hfiles with a problem. e.g. in the example there are 2
* hfiles and they each have the same order of magnitude number of rows, specifically between 100
* and 999.</li>
* <li>SIZES OF CELLS: - this counter group gives a histogram of the order of magnitude of the size
* of mob values according to our reference cells. e.g. in the example above we have cell sizes that
* are all between 10,000 bytes and 9,999,999 bytes. From this histogram we can also see that _most_
* cells are 100,000 - 999,000 bytes and the smaller and bigger ones are outliers making up less
* than 2% of mob cells.</li>
* <li>SIZES OF ROWS: - this counter group gives a histogram of the order of magnitude of the size
* of mob values across each row according to our reference cells. In the example above we have rows
* that are are between 100,000 bytes and 9,999,999 bytes. We can also see that about 2/3rd of our
* rows are 100,000 - 999,999 bytes.</li>
* </ol>
* Generates a report that gives one file status per line, with tabs dividing fields.
*
* <pre>
@ -133,32 +133,31 @@ import org.slf4j.LoggerFactory;
* </pre>
*
* Possible results are listed; the first three indicate things are working properly.
* * MOB DIR - the reference is in the normal MOB area for the given table and CF
* * HLINK TO ARCHIVE FOR SAME TABLE - the reference is present in the archive area for this
* table and CF
* * HLINK TO ARCHIVE FOR OTHER TABLE - the reference is present in a different table and CF,
* either in the MOB or archive areas (e.g. from a snapshot restore or clone)
* * ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE - the reference is currently present in the archive
* area for this table and CF, but it is kept there because a _different_ table has a
* reference to it (e.g. from a snapshot clone). If these other tables are removed then
* the file will likely be deleted unless there is a snapshot also referencing it.
* * ARCHIVE BUT NO HLINKS - the reference is currently present in the archive for this table and
* CF, but there are no references present to prevent its removal. Unless it is newer than
* the general TTL (default 5 minutes) or referenced in a snapshot it will be subject to
* cleaning.
* * ARCHIVE BUT FAILURE WHILE CHECKING HLINKS - Check the job logs to see why things failed while
* looking for why this file is being kept around.
* * MISSING FILE - We couldn't find the reference on the FileSystem. Either there is dataloss due
* to a bug in the MOB storage system or the MOB storage is damaged but in an edge case that
* allows it to work for now. You can verify which by doing a raw reference scan to get the
* referenced hfile and check the underlying filesystem. See the ref guide section on mob
* for details.
* * HLINK BUT POINT TO MISSING FILE - There is a pointer in our mob area for this table and CF
* to a file elsewhere on the FileSystem, however the file it points to no longer exists.
* * MISSING FILE BUT FAILURE WHILE CHECKING HLINKS - We could not find the referenced file,
* however you should check the job logs to see why we couldn't check to see if there is a
* pointer to the referenced file in our archive or another table's archive or mob area.
*
* <ol>
* <li>MOB DIR - the reference is in the normal MOB area for the given table and CF</li>
* <li>HLINK TO ARCHIVE FOR SAME TABLE - the reference is present in the archive area for this table
* and CF</li>
* <li>HLINK TO ARCHIVE FOR OTHER TABLE - the reference is present in a different table and CF,
* either in the MOB or archive areas (e.g. from a snapshot restore or clone)</li>
* <li>ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE - the reference is currently present in the archive
* area for this table and CF, but it is kept there because a _different_ table has a reference to
* it (e.g. from a snapshot clone). If these other tables are removed then the file will likely be
* deleted unless there is a snapshot also referencing it.</li>
* <li>ARCHIVE BUT NO HLINKS - the reference is currently present in the archive for this table and
* CF, but there are no references present to prevent its removal. Unless it is newer than the
* general TTL (default 5 minutes) or referenced in a snapshot it will be subject to cleaning.</li>
* <li>ARCHIVE BUT FAILURE WHILE CHECKING HLINKS - Check the job logs to see why things failed while
* looking for why this file is being kept around.</li>
* <li>MISSING FILE - We couldn't find the reference on the FileSystem. Either there is dataloss due
* to a bug in the MOB storage system or the MOB storage is damaged but in an edge case that allows
* it to work for now. You can verify which by doing a raw reference scan to get the referenced
* hfile and check the underlying filesystem. See the ref guide section on mob for details.</li>
* <li>HLINK BUT POINT TO MISSING FILE - There is a pointer in our mob area for this table and CF to
* a file elsewhere on the FileSystem, however the file it points to no longer exists.</li>
* <li>MISSING FILE BUT FAILURE WHILE CHECKING HLINKS - We could not find the referenced file,
* however you should check the job logs to see why we couldn't check to see if there is a pointer
* to the referenced file in our archive or another table's archive or mob area.</li>
* </ol>
*/
@InterfaceAudience.Private
public class MobRefReporter extends Configured implements Tool {

View File

@ -298,15 +298,16 @@ public class TestImportExport {
IMPORT_TABLE, FQ_OUTPUT_DIR
};
assertTrue(runImport(args));
/* exportedTableIn94Format contains 5 rows
ROW COLUMN+CELL
r1 column=f1:c1, timestamp=1383766761171, value=val1
r2 column=f1:c1, timestamp=1383766771642, value=val2
r3 column=f1:c1, timestamp=1383766777615, value=val3
r4 column=f1:c1, timestamp=1383766785146, value=val4
r5 column=f1:c1, timestamp=1383766791506, value=val5
*/
assertEquals(5, UTIL.countRows(t));
// @formatter:off
// exportedTableIn94Format contains 5 rows
// ROW COLUMN+CELL
// r1 column=f1:c1, timestamp=1383766761171, value=val1
// r2 column=f1:c1, timestamp=1383766771642, value=val2
// r3 column=f1:c1, timestamp=1383766777615, value=val3
// r4 column=f1:c1, timestamp=1383766785146, value=val4
// r5 column=f1:c1, timestamp=1383766791506, value=val5
// @formatter:on
assertEquals(5, UTIL.countRows(t));
}
}
@ -331,12 +332,9 @@ public class TestImportExport {
p.addColumn(FAMILYA, QUAL, now + 3, QUAL);
p.addColumn(FAMILYA, QUAL, now + 4, QUAL);
t.put(p);
String[] args = new String[] {
"-D" + ExportUtils.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE, // added scanner batching arg.
name.getMethodName(),
FQ_OUTPUT_DIR
};
// added scanner batching arg.
String[] args = new String[] { "-D" + ExportUtils.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE,
name.getMethodName(), FQ_OUTPUT_DIR };
assertTrue(runExport(args));
FileSystem fs = FileSystem.get(UTIL.getConfiguration());

View File

@ -48,8 +48,9 @@ public class TestRowModel extends TestModelBase<RowModel> {
public TestRowModel() throws Exception {
super(RowModel.class);
AS_XML =
"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><Row key=\"dGVzdHJvdzE=\">" +
"<Cell column=\"dGVzdGNvbHVtbjE=\" timestamp=\"1245219839331\">dGVzdHZhbHVlMQ==</Cell></Row>";
"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>" + "<Row key=\"dGVzdHJvdzE=\">"
+ "<Cell column=\"dGVzdGNvbHVtbjE=\" timestamp=\"1245219839331\">dGVzdHZhbHVlMQ==</Cell>"
+ "</Row>";
AS_JSON =
"{\"key\":\"dGVzdHJvdzE=\",\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\"," +

View File

@ -157,7 +157,6 @@ public class ForeignException extends IOException {
* @param bytes
* @return the ForeignExcpetion instance
* @throws InvalidProtocolBufferException if there was deserialization problem this is thrown.
* @throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException
*/
public static ForeignException deserialize(byte[] bytes)
throws IOException {

View File

@ -534,13 +534,13 @@ public final class HFile {
/**
* @param fs filesystem
* @param path Path to file to read
* @param cacheConf This must not be null. @see
* {@link org.apache.hadoop.hbase.io.hfile.CacheConfig#CacheConfig(Configuration)}
* @param cacheConf This must not be null.
* @param primaryReplicaReader true if this is a reader for primary replica
* @param conf Configuration
* @return an active Reader instance
* @throws IOException Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile
* is corrupt/invalid.
* @see CacheConfig#CacheConfig(Configuration)
*/
public static Reader createReader(FileSystem fs, Path path, CacheConfig cacheConf,
boolean primaryReplicaReader, Configuration conf) throws IOException {

View File

@ -149,11 +149,10 @@ public class HMasterCommandLine extends ServerCommandLine {
if (shutDownCluster) {
return stopMaster();
}
System.err.println(
"To shutdown the master run " +
"hbase-daemon.sh stop master or send a kill signal to " +
"the HMaster pid, " +
"and to stop HBase Cluster run \"stop-hbase.sh\" or \"hbase master stop --shutDownCluster\"");
System.err.println("To shutdown the master run "
+ "hbase-daemon.sh stop master or send a kill signal to the HMaster pid, "
+ "and to stop HBase Cluster run \"stop-hbase.sh\" or \"hbase master "
+ "stop --shutDownCluster\"");
return 1;
} else if ("clear".equals(command)) {
return (ZNodeClearer.clear(getConf()) ? 0 : 1);

View File

@ -293,9 +293,9 @@ public class MasterWalManager {
splitLog(serverNames, META_FILTER);
}
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UL_UNRELEASED_LOCK", justification=
"We only release this lock when we set it. Updates to code that uses it should verify use " +
"of the guard boolean.")
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "UL_UNRELEASED_LOCK",
justification = "We only release this lock when we set it. Updates to code "
+ "that uses it should verify use of the guard boolean.")
List<Path> getLogDirs(final Set<ServerName> serverNames) throws IOException {
List<Path> logDirs = new ArrayList<>();
boolean needReleaseLock = false;

View File

@ -16,6 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import static org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective.CHECK;
import static org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective.FORCE;
import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.DELETED;
@ -155,10 +156,10 @@ public class SplitLogManager {
/**
* Get a list of paths that need to be split given a set of server-specific directories and
* optionally a filter.
*
* <p/>
* See {@link AbstractFSWALProvider#getServerNameFromWALDirectoryName} for more info on directory
* layout.
*
* <p/>
* Should be package-private, but is needed by
* {@link org.apache.hadoop.hbase.wal.WALSplitter#split(Path, Path, Path, FileSystem,
* Configuration, org.apache.hadoop.hbase.wal.WALFactory)} for tests.

View File

@ -274,11 +274,11 @@ class SimpleRegionNormalizer implements RegionNormalizer, ConfigurationObserver
}
/**
* @param tableRegions regions of table to normalize
* Also make sure tableRegions contains regions of the same table
* @param tableRegions regions of table to normalize
* @param tableDescriptor the TableDescriptor
* @return average region size depending on
* @see org.apache.hadoop.hbase.client.TableDescriptor#getNormalizerTargetRegionCount()
* Also make sure tableRegions contains regions of the same table
* @see TableDescriptor#getNormalizerTargetRegionCount()
*/
private double getAverageRegionSizeMb(final List<RegionInfo> tableRegions,
final TableDescriptor tableDescriptor) {

View File

@ -64,8 +64,9 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloneSnapshotState;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
@InterfaceAudience.Private
@ -310,11 +311,10 @@ public class CloneSnapshotProcedure
throws IOException {
super.serializeStateData(serializer);
MasterProcedureProtos.CloneSnapshotStateData.Builder cloneSnapshotMsg =
MasterProcedureProtos.CloneSnapshotStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setSnapshot(this.snapshot)
.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
CloneSnapshotStateData.Builder cloneSnapshotMsg = CloneSnapshotStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setSnapshot(this.snapshot)
.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
cloneSnapshotMsg.setRestoreAcl(restoreAcl);
if (newRegions != null) {
@ -328,11 +328,11 @@ public class CloneSnapshotProcedure
while (it.hasNext()) {
final Map.Entry<String, Pair<String, String>> entry = it.next();
MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder parentToChildrenPair =
MasterProcedureProtos.RestoreParentToChildRegionsPair.newBuilder()
.setParentRegionName(entry.getKey())
.setChild1RegionName(entry.getValue().getFirst())
.setChild2RegionName(entry.getValue().getSecond());
RestoreParentToChildRegionsPair.Builder parentToChildrenPair =
RestoreParentToChildRegionsPair.newBuilder()
.setParentRegionName(entry.getKey())
.setChild1RegionName(entry.getValue().getFirst())
.setChild2RegionName(entry.getValue().getSecond());
cloneSnapshotMsg.addParentToChildRegionsPairList(parentToChildrenPair);
}
}
@ -347,8 +347,7 @@ public class CloneSnapshotProcedure
throws IOException {
super.deserializeStateData(serializer);
MasterProcedureProtos.CloneSnapshotStateData cloneSnapshotMsg =
serializer.deserialize(MasterProcedureProtos.CloneSnapshotStateData.class);
CloneSnapshotStateData cloneSnapshotMsg = serializer.deserialize(CloneSnapshotStateData.class);
setUser(MasterProcedureUtil.toUserInfo(cloneSnapshotMsg.getUserInfo()));
snapshot = cloneSnapshotMsg.getSnapshot();
tableDescriptor = ProtobufUtil.toTableDescriptor(cloneSnapshotMsg.getTableSchema());
@ -365,8 +364,8 @@ public class CloneSnapshotProcedure
}
if (cloneSnapshotMsg.getParentToChildRegionsPairListCount() > 0) {
parentsToChildrenPairMap = new HashMap<>();
for (MasterProcedureProtos.RestoreParentToChildRegionsPair parentToChildrenPair:
cloneSnapshotMsg.getParentToChildRegionsPairListList()) {
for (RestoreParentToChildRegionsPair parentToChildrenPair : cloneSnapshotMsg
.getParentToChildRegionsPairListList()) {
parentsToChildrenPairMap.put(
parentToChildrenPair.getParentRegionName(),
new Pair<>(

View File

@ -58,8 +58,9 @@ import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
@InterfaceAudience.Private
@ -239,11 +240,10 @@ public class RestoreSnapshotProcedure
throws IOException {
super.serializeStateData(serializer);
MasterProcedureProtos.RestoreSnapshotStateData.Builder restoreSnapshotMsg =
MasterProcedureProtos.RestoreSnapshotStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setSnapshot(this.snapshot)
.setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor));
RestoreSnapshotStateData.Builder restoreSnapshotMsg = RestoreSnapshotStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setSnapshot(this.snapshot)
.setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor));
if (regionsToRestore != null) {
for (RegionInfo hri: regionsToRestore) {
@ -266,11 +266,11 @@ public class RestoreSnapshotProcedure
while (it.hasNext()) {
final Map.Entry<String, Pair<String, String>> entry = it.next();
MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder parentToChildrenPair =
MasterProcedureProtos.RestoreParentToChildRegionsPair.newBuilder()
.setParentRegionName(entry.getKey())
.setChild1RegionName(entry.getValue().getFirst())
.setChild2RegionName(entry.getValue().getSecond());
RestoreParentToChildRegionsPair.Builder parentToChildrenPair =
RestoreParentToChildRegionsPair.newBuilder()
.setParentRegionName(entry.getKey())
.setChild1RegionName(entry.getValue().getFirst())
.setChild2RegionName(entry.getValue().getSecond());
restoreSnapshotMsg.addParentToChildRegionsPairList (parentToChildrenPair);
}
}
@ -283,8 +283,8 @@ public class RestoreSnapshotProcedure
throws IOException {
super.deserializeStateData(serializer);
MasterProcedureProtos.RestoreSnapshotStateData restoreSnapshotMsg =
serializer.deserialize(MasterProcedureProtos.RestoreSnapshotStateData.class);
RestoreSnapshotStateData restoreSnapshotMsg =
serializer.deserialize(RestoreSnapshotStateData.class);
setUser(MasterProcedureUtil.toUserInfo(restoreSnapshotMsg.getUserInfo()));
snapshot = restoreSnapshotMsg.getSnapshot();
modifiedTableDescriptor =
@ -315,8 +315,8 @@ public class RestoreSnapshotProcedure
}
}
if (restoreSnapshotMsg.getParentToChildRegionsPairListCount() > 0) {
for (MasterProcedureProtos.RestoreParentToChildRegionsPair parentToChildrenPair:
restoreSnapshotMsg.getParentToChildRegionsPairListList()) {
for (RestoreParentToChildRegionsPair parentToChildrenPair : restoreSnapshotMsg
.getParentToChildRegionsPairListList()) {
parentsToChildrenPairMap.put(
parentToChildrenPair.getParentRegionName(),
new Pair<>(

View File

@ -79,8 +79,8 @@ public class BalancerRejectionQueueService implements NamedQueueService {
return;
}
if (!(namedQueuePayload instanceof BalancerRejectionDetails)) {
LOG.warn(
"BalancerRejectionQueueService: NamedQueuePayload is not of type BalancerRejectionDetails.");
LOG.warn("BalancerRejectionQueueService: NamedQueuePayload is not of type"
+ " BalancerRejectionDetails.");
return;
}
BalancerRejectionDetails balancerRejectionDetails = (BalancerRejectionDetails) namedQueuePayload;

View File

@ -34,9 +34,10 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
justification="FindBugs seems confused; says bypassGlobals, namepaceLimiters, and " +
"tableLimiters are mostly synchronized...but to me it looks like they are totally synchronized")
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC",
justification = "FindBugs seems confused; says bypassGlobals, namepaceLimiters, and "
+ "tableLimiters are mostly synchronized..."
+ "but to me it looks like they are totally synchronized")
public class UserQuotaState extends QuotaState {
private Map<String, QuotaLimiter> namespaceLimiters = null;
private Map<TableName, QuotaLimiter> tableLimiters = null;

View File

@ -55,9 +55,11 @@ public abstract class AbstractMemStore implements MemStore {
protected RegionServicesForStores regionServices;
// @formatter:off
public final static long FIXED_OVERHEAD = (long) ClassSize.OBJECT
+ (5 * ClassSize.REFERENCE)
+ (2 * Bytes.SIZEOF_LONG); // snapshotId, timeOfOldestEdit
+ (5 * ClassSize.REFERENCE)
+ (2 * Bytes.SIZEOF_LONG); // snapshotId, timeOfOldestEdit
// @formatter:on
public final static long DEEP_OVERHEAD = FIXED_OVERHEAD;

View File

@ -2088,8 +2088,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
throw new ServiceException(ie);
}
// We are assigning meta, wait a little for regionserver to finish initialization.
int timeout = regionServer.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
HConstants.DEFAULT_HBASE_RPC_TIMEOUT) >> 2; // Quarter of RPC timeout
// Default to quarter of RPC timeout
int timeout = regionServer.getConfiguration()
.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT) >> 2;
long endTime = EnvironmentEdgeManager.currentTime() + timeout;
synchronized (regionServer.online) {
try {

View File

@ -200,10 +200,10 @@ public class ReplicationSource implements ReplicationSourceInterface {
this.waitOnEndpointSeconds =
this.conf.getInt(WAIT_ON_ENDPOINT_SECONDS, DEFAULT_WAIT_ON_ENDPOINT_SECONDS);
decorateConf();
this.sleepForRetries =
this.conf.getLong("replication.source.sleepforretries", 1000); // 1 second
this.maxRetriesMultiplier =
this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes @ 1 sec per
// 1 second
this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000);
// 5 minutes @ 1 sec per
this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
this.queueSizePerGroup = this.conf.getInt("hbase.regionserver.maxlogs", 32);
this.logQueue = new ReplicationSourceLogQueue(conf, metrics, this);
this.queueStorage = queueStorage;

View File

@ -81,14 +81,15 @@ public class ReplicationSourceShipper extends Thread {
this.walGroupId = walGroupId;
this.logQueue = logQueue;
this.source = source;
this.sleepForRetries =
this.conf.getLong("replication.source.sleepforretries", 1000); // 1 second
this.maxRetriesMultiplier =
this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes @ 1 sec per
// 1 second
this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000);
// 5 minutes @ 1 sec per
this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
// 20 seconds
this.getEntriesTimeout =
this.conf.getInt("replication.source.getEntries.timeout", DEFAULT_TIMEOUT); // 20 seconds
this.conf.getInt("replication.source.getEntries.timeout", DEFAULT_TIMEOUT);
this.shipEditsTimeout = this.conf.getInt(HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT,
HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT);
HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT);
}
@Override

View File

@ -106,10 +106,10 @@ class ReplicationSourceWALReader extends Thread {
int batchCount = conf.getInt("replication.source.nb.batches", 1);
this.totalBufferUsed = source.getSourceManager().getTotalBufferUsed();
this.totalBufferQuota = source.getSourceManager().getTotalBufferLimit();
this.sleepForRetries =
this.conf.getLong("replication.source.sleepforretries", 1000); // 1 second
this.maxRetriesMultiplier =
this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes @ 1 sec per
// 1 second
this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000);
// 5 minutes @ 1 sec per
this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
this.eofAutoRecovery = conf.getBoolean("replication.source.eof.autorecovery", false);
this.entryBatchQueue = new LinkedBlockingQueue<>(batchCount);
this.walGroupId = walGroupId;

View File

@ -72,7 +72,7 @@ public class TokenUtil {
/**
* See {@link ClientTokenUtil#toToken(org.apache.hadoop.security.token.Token)}.
* See {@link ClientTokenUtil#toToken(Token)}.
* @deprecated External users should not use this method. Please post on
* the HBase dev mailing list if you need this method. Internal
* HBase code should use {@link ClientTokenUtil} instead.
@ -83,8 +83,7 @@ public class TokenUtil {
}
/**
* See {@link ClientTokenUtil#obtainToken(org.apache.hadoop.hbase.client.Connection,
* org.apache.hadoop.hbase.security.User)}.
* See {@link ClientTokenUtil#obtainToken(Connection, User)}.
* @deprecated External users should not use this method. Please post on
* the HBase dev mailing list if you need this method. Internal
* HBase code should use {@link ClientTokenUtil} instead.
@ -96,8 +95,7 @@ public class TokenUtil {
}
/**
* See {@link ClientTokenUtil#obtainAndCacheToken(org.apache.hadoop.hbase.client.Connection,
* org.apache.hadoop.hbase.security.User)}.
* See {@link ClientTokenUtil#obtainAndCacheToken(Connection, User)}.
*/
public static void obtainAndCacheToken(final Connection conn,
User user)
@ -106,7 +104,7 @@ public class TokenUtil {
}
/**
* See {@link ClientTokenUtil#toToken(org.apache.hadoop.security.token.Token)}.
* See {@link ClientTokenUtil#toToken(org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.Token)}.
* @deprecated External users should not use this method. Please post on
* the HBase dev mailing list if you need this method. Internal
* HBase code should use {@link ClientTokenUtil} instead.

View File

@ -167,8 +167,8 @@ public class EntryBuffers {
internify(entry);
entries.add(entry);
// TODO linkedlist entry
long incrHeap = entry.getEdit().heapSize() +
ClassSize.align(2 * ClassSize.REFERENCE); // WALKey pointers
// entry size plus WALKey pointers
long incrHeap = entry.getEdit().heapSize() + ClassSize.align(2 * ClassSize.REFERENCE);
heapInBuffer += incrHeap;
return incrHeap;
}

View File

@ -2924,9 +2924,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
if (jobConf == null) {
jobConf = mrCluster.createJobConf();
}
jobConf.set("mapreduce.cluster.local.dir",
conf.get("mapreduce.cluster.local.dir")); //Hadoop MiniMR overwrites this while it should not
// Hadoop MiniMR overwrites this while it should not
jobConf.set("mapreduce.cluster.local.dir", conf.get("mapreduce.cluster.local.dir"));
LOG.info("Mini mapreduce cluster started");
// In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.

View File

@ -223,8 +223,9 @@ public class TestRegionObserverScannerOpenHook {
Get get = new Get(ROW);
Result r = region.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.listCells());
"Got an unexpected number of rows - "
+ "no data should be returned with the NoDataFromScan coprocessor. Found: " + r,
r.listCells());
HBaseTestingUtility.closeRegionAndWAL(region);
}
@ -250,8 +251,9 @@ public class TestRegionObserverScannerOpenHook {
Get get = new Get(ROW);
Result r = region.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.listCells());
"Got an unexpected number of rows - "
+ "no data should be returned with the NoDataFromScan coprocessor. Found: " + r,
r.listCells());
HBaseTestingUtility.closeRegionAndWAL(region);
}
@ -269,15 +271,19 @@ public class TestRegionObserverScannerOpenHook {
}
public CountDownLatch getCompactionStateChangeLatch() {
if (compactionStateChangeLatch == null) compactionStateChangeLatch = new CountDownLatch(1);
if (compactionStateChangeLatch == null) {
compactionStateChangeLatch = new CountDownLatch(1);
}
return compactionStateChangeLatch;
}
@Override
public boolean compact(CompactionContext compaction, HStore store,
ThroughputController throughputController) throws IOException {
ThroughputController throughputController) throws IOException {
boolean ret = super.compact(compaction, store, throughputController);
if (ret) compactionStateChangeLatch.countDown();
if (ret) {
compactionStateChangeLatch.countDown();
}
return ret;
}
@ -341,14 +347,16 @@ public class TestRegionObserverScannerOpenHook {
Get get = new Get(ROW);
Result r = table.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.listCells());
"Got an unexpected number of rows - "
+ "no data should be returned with the NoDataFromScan coprocessor. Found: " + r,
r.listCells());
get = new Get(Bytes.toBytes("anotherrow"));
r = table.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor Found: "
+ r, r.listCells());
"Got an unexpected number of rows - "
+ "no data should be returned with the NoDataFromScan coprocessor Found: " + r,
r.listCells());
table.close();
UTIL.shutdownMiniCluster();

View File

@ -193,8 +193,9 @@ public class TestFavoredNodeAssignmentHelper {
// the primary can be assigned but the secondary/tertiary would be null
Map<String,Integer> rackToServerCount = new HashMap<>();
rackToServerCount.put("rack1", 1);
Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper,
List<RegionInfo>> primaryRSMapAndHelper =
secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<RegionInfo> regions = primaryRSMapAndHelper.getThird();

View File

@ -212,9 +212,10 @@ public class TestFixedFileTrailer {
String msg = ex.getMessage();
String cleanMsg = msg.replaceAll(
"^(java(\\.[a-zA-Z]+)+:\\s+)?|\\s+\\(.*\\)\\s*$", "");
assertEquals("Actual exception message is \"" + msg + "\".\n" +
"Cleaned-up message", // will be followed by " expected: ..."
"Invalid HFile version: " + invalidVersion, cleanMsg);
// will be followed by " expected: ..."
assertEquals("Actual exception message is \"" + msg + "\".\nCleaned-up message",
"Invalid HFile version: " + invalidVersion,
cleanMsg);
LOG.info("Got an expected exception: " + msg);
}
}

View File

@ -309,28 +309,29 @@ public class TestHFileBlock {
@Test
public void testGzipCompression() throws IOException {
final String correctTestBlockStr =
"DATABLK*\\x00\\x00\\x00>\\x00\\x00\\x0F\\xA0\\xFF\\xFF\\xFF\\xFF"
+ "\\xFF\\xFF\\xFF\\xFF"
+ "\\x0" + ChecksumType.getDefaultChecksumType().getCode()
+ "\\x00\\x00@\\x00\\x00\\x00\\x00["
// gzip-compressed block: http://www.gzip.org/zlib/rfc-gzip.html
+ "\\x1F\\x8B" // gzip magic signature
+ "\\x08" // Compression method: 8 = "deflate"
+ "\\x00" // Flags
+ "\\x00\\x00\\x00\\x00" // mtime
+ "\\x00" // XFL (extra flags)
// OS (0 = FAT filesystems, 3 = Unix). However, this field
// sometimes gets set to 0 on Linux and Mac, so we reset it to 3.
// This appears to be a difference caused by the availability
// (and use) of the native GZ codec.
+ "\\x03"
+ "\\xED\\xC3\\xC1\\x11\\x00 \\x08\\xC00DD\\xDD\\x7Fa"
+ "\\xD6\\xE8\\xA3\\xB9K\\x84`\\x96Q\\xD3\\xA8\\xDB\\xA8e\\xD4c"
+ "\\xD46\\xEA5\\xEA3\\xEA7\\xE7\\x00LI\\x5Cs\\xA0\\x0F\\x00\\x00"
+ "\\x00\\x00\\x00\\x00"; // 4 byte checksum (ignored)
final int correctGzipBlockLength = 95;
final String testBlockStr = createTestBlockStr(GZ, correctGzipBlockLength, false);
// @formatter:off
String correctTestBlockStr = "DATABLK*\\x00\\x00\\x00>\\x00\\x00\\x0F\\xA0\\xFF\\xFF\\xFF\\xFF"
+ "\\xFF\\xFF\\xFF\\xFF"
+ "\\x0" + ChecksumType.getDefaultChecksumType().getCode()
+ "\\x00\\x00@\\x00\\x00\\x00\\x00["
// gzip-compressed block: http://www.gzip.org/zlib/rfc-gzip.html
+ "\\x1F\\x8B" // gzip magic signature
+ "\\x08" // Compression method: 8 = "deflate"
+ "\\x00" // Flags
+ "\\x00\\x00\\x00\\x00" // mtime
+ "\\x00" // XFL (extra flags)
// OS (0 = FAT filesystems, 3 = Unix). However, this field
// sometimes gets set to 0 on Linux and Mac, so we reset it to 3.
// This appears to be a difference caused by the availability
// (and use) of the native GZ codec.
+ "\\x03"
+ "\\xED\\xC3\\xC1\\x11\\x00 \\x08\\xC00DD\\xDD\\x7Fa"
+ "\\xD6\\xE8\\xA3\\xB9K\\x84`\\x96Q\\xD3\\xA8\\xDB\\xA8e\\xD4c"
+ "\\xD46\\xEA5\\xEA3\\xEA7\\xE7\\x00LI\\x5Cs\\xA0\\x0F\\x00\\x00"
+ "\\x00\\x00\\x00\\x00"; // 4 byte checksum (ignored)
// @formatter:on
int correctGzipBlockLength = 95;
String testBlockStr = createTestBlockStr(GZ, correctGzipBlockLength, false);
// We ignore the block checksum because createTestBlockStr can change the
// gzip header after the block is produced
assertEquals(correctTestBlockStr.substring(0, correctGzipBlockLength - 4),

View File

@ -75,34 +75,24 @@ public class TestMetricsRegion {
// test region with replica id > 0
mr = new MetricsRegion(new MetricsRegionWrapperStub(1), new Configuration());
agg = mr.getSource().getAggregateSource();
HELPER.assertGauge(
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_storeCount",
101, agg);
HELPER.assertGauge(
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_storeFileCount",
102, agg);
HELPER.assertGauge(
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_memstoreSize",
103, agg);
HELPER.assertCounter(
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_" +
"filteredReadRequestCount",
107, agg);
HELPER.assertCounter(
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_replicaid",
1, agg);
HELPER.assertCounter(
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_compactionsQueuedCount",
4, agg);
HELPER.assertCounter(
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_flushesQueuedCount",
6, agg);
HELPER.assertCounter(
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_maxCompactionQueueSize",
4, agg);
HELPER.assertCounter(
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_maxFlushQueueSize",
6, agg);
HELPER.assertGauge("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
+ "_metric_storeCount", 101, agg);
HELPER.assertGauge("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
+ "_metric_storeFileCount", 102, agg);
HELPER.assertGauge("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
+ "_metric_memstoreSize", 103, agg);
HELPER.assertCounter("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
+ "_metric_filteredReadRequestCount", 107, agg);
HELPER.assertCounter("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
+ "_metric_replicaid", 1, agg);
HELPER.assertCounter("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
+ "_metric_compactionsQueuedCount", 4, agg);
HELPER.assertCounter("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
+ "_metric_flushesQueuedCount", 6, agg);
HELPER.assertCounter("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
+ "_metric_maxCompactionQueueSize", 4, agg);
HELPER.assertCounter("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
+ "_metric_maxFlushQueueSize", 6, agg);
mr.close();
}
}

View File

@ -393,17 +393,30 @@ public class TestRegionMergeTransactionOnCluster {
for (Pair<RegionInfo, ServerName> p : currentRegionToServers) {
currentRegions.add(p.getFirst());
}
assertTrue(initialRegions.contains(mergedRegions.getFirst())); //this is the first region
assertTrue(initialRegions.contains(RegionReplicaUtil
.getRegionInfoForReplica(mergedRegions.getFirst(), 1))); //this is the replica of the first region
assertTrue(initialRegions.contains(mergedRegions.getSecond())); //this is the second region
assertTrue(initialRegions.contains(RegionReplicaUtil
.getRegionInfoForReplica(mergedRegions.getSecond(), 1))); //this is the replica of the second region
assertTrue(!initialRegions.contains(currentRegions.get(0))); //this is the new region
assertTrue(!initialRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(currentRegions.get(0), 1))); //replica of the new region
assertTrue(currentRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(currentRegions.get(0), 1))); //replica of the new region
assertTrue(!currentRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getFirst(), 1))); //replica of the merged region
assertTrue(!currentRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getSecond(), 1))); //replica of the merged region
// this is the first region
assertTrue(initialRegions.contains(mergedRegions.getFirst()));
// this is the replica of the first region
assertTrue(initialRegions
.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getFirst(), 1)));
// this is the second region
assertTrue(initialRegions.contains(mergedRegions.getSecond()));
// this is the replica of the second region
assertTrue(initialRegions
.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getSecond(), 1)));
// this is the new region
assertTrue(!initialRegions.contains(currentRegions.get(0)));
// replica of the new region
assertTrue(!initialRegions
.contains(RegionReplicaUtil.getRegionInfoForReplica(currentRegions.get(0), 1)));
// replica of the new region
assertTrue(currentRegions
.contains(RegionReplicaUtil.getRegionInfoForReplica(currentRegions.get(0), 1)));
// replica of the merged region
assertTrue(!currentRegions
.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getFirst(), 1)));
// replica of the merged region
assertTrue(!currentRegions
.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getSecond(), 1)));
table.close();
} finally {
TEST_UTIL.deleteTable(tableName);

View File

@ -708,22 +708,30 @@ public class TestStoreScanner {
@Test
public void testWildCardScannerUnderDeletes() throws IOException {
KeyValue [] kvs = new KeyValue [] {
create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"), // inc
// orphaned delete column.
create("R1", "cf", "a", 1, KeyValue.Type.DeleteColumn, "dont-care"),
// column b
create("R1", "cf", "b", 2, KeyValue.Type.Put, "dont-care"), // inc
create("R1", "cf", "b", 1, KeyValue.Type.Put, "dont-care"), // inc
// column c
create("R1", "cf", "c", 10, KeyValue.Type.Delete, "dont-care"),
create("R1", "cf", "c", 10, KeyValue.Type.Put, "dont-care"), // no
create("R1", "cf", "c", 9, KeyValue.Type.Put, "dont-care"), // inc
// column d
create("R1", "cf", "d", 11, KeyValue.Type.Put, "dont-care"), // inc
create("R1", "cf", "d", 10, KeyValue.Type.DeleteColumn, "dont-care"),
create("R1", "cf", "d", 9, KeyValue.Type.Put, "dont-care"), // no
create("R1", "cf", "d", 8, KeyValue.Type.Put, "dont-care"), // no
KeyValue[] kvs = new KeyValue[] {
// inc
create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"),
// orphaned delete column.
create("R1", "cf", "a", 1, KeyValue.Type.DeleteColumn, "dont-care"),
// column b
// inc
create("R1", "cf", "b", 2, KeyValue.Type.Put, "dont-care"),
// inc
create("R1", "cf", "b", 1, KeyValue.Type.Put, "dont-care"),
// column c
create("R1", "cf", "c", 10, KeyValue.Type.Delete, "dont-care"),
// no
create("R1", "cf", "c", 10, KeyValue.Type.Put, "dont-care"),
// inc
create("R1", "cf", "c", 9, KeyValue.Type.Put, "dont-care"),
// column d
// inc
create("R1", "cf", "d", 11, KeyValue.Type.Put, "dont-care"),
create("R1", "cf", "d", 10, KeyValue.Type.DeleteColumn, "dont-care"),
// no
create("R1", "cf", "d", 9, KeyValue.Type.Put, "dont-care"),
// no
create("R1", "cf", "d", 8, KeyValue.Type.Put, "dont-care"),
};
List<KeyValueScanner> scanners = scanFixture(kvs);
@ -926,6 +934,7 @@ public class TestStoreScanner {
return now;
}
});
// @formatter:off
KeyValue[] kvs = new KeyValue[]{
/*0*/ new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null,
now - 100, KeyValue.Type.DeleteFamily), // live
@ -944,22 +953,23 @@ public class TestStoreScanner {
/*7*/ create("R1", "cf", "a",
now - 100, KeyValue.Type.DeleteColumn, "dont-care"), // max-version
/*8*/ create("R1", "cf", "b", now - 600,
KeyValue.Type.DeleteColumn, "dont-care"), //expired
KeyValue.Type.DeleteColumn, "dont-care"), // expired
/*9*/ create("R1", "cf", "b", now - 70,
KeyValue.Type.Put, "v2"), //live
KeyValue.Type.Put, "v2"), // live
/*10*/ create("R1", "cf", "b", now - 750,
KeyValue.Type.Put, "v1"), //expired
KeyValue.Type.Put, "v1"), // expired
/*11*/ create("R1", "cf", "c", now - 500,
KeyValue.Type.Delete, "dontcare"), //expired
KeyValue.Type.Delete, "dontcare"), // expired
/*12*/ create("R1", "cf", "c", now - 600,
KeyValue.Type.Put, "v1"), //expired
KeyValue.Type.Put, "v1"), // expired
/*13*/ create("R1", "cf", "c", now - 1000,
KeyValue.Type.Delete, "dontcare"), //expired
KeyValue.Type.Delete, "dontcare"), // expired
/*14*/ create("R1", "cf", "d", now - 60,
KeyValue.Type.Put, "expired put"), //live
KeyValue.Type.Put, "expired put"), // live
/*15*/ create("R1", "cf", "d", now - 100,
KeyValue.Type.Delete, "not-expired delete"), //live
KeyValue.Type.Delete, "not-expired delete"), // live
};
// @formatter:on
List<KeyValueScanner> scanners = scanFixture(kvs);
ScanInfo scanInfo = new ScanInfo(CONF, Bytes.toBytes("cf"),
0 /* minVersions */,

View File

@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hbase.regionserver.throttle;
import static org.apache.hadoop.hbase.regionserver.throttle.PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND;
import static org.apache.hadoop.hbase.regionserver.throttle.PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND;
import static org.apache.hadoop.hbase.regionserver.throttle.PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@ -111,14 +114,8 @@ public class TestCompactionWithThroughputController {
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
conf.setLong(
PressureAwareCompactionThroughputController
.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
throughputLimit);
conf.setLong(
PressureAwareCompactionThroughputController
.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
throughputLimit);
conf.setLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND, throughputLimit);
conf.setLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND, throughputLimit);
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
PressureAwareCompactionThroughputController.class.getName());
TEST_UTIL.startMiniCluster(1);
@ -183,21 +180,13 @@ public class TestCompactionWithThroughputController {
public void testThroughputTuning() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
conf.setLong(
PressureAwareCompactionThroughputController
.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
20L * 1024 * 1024);
conf.setLong(
PressureAwareCompactionThroughputController
.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
10L * 1024 * 1024);
conf.setLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND, 20L * 1024 * 1024);
conf.setLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND, 10L * 1024 * 1024);
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 4);
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 6);
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
PressureAwareCompactionThroughputController.class.getName());
conf.setInt(
PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD,
1000);
conf.setInt(HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD, 1000);
TEST_UTIL.startMiniCluster(1);
Connection conn = ConnectionFactory.createConnection(conf);
try {

View File

@ -38,7 +38,7 @@ import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState;
/**
* This UT is used to make sure that we will not accidentally change the way to generate online
@ -95,8 +95,8 @@ public class TestRefreshPeerWhileRegionServerRestarts extends TestReplicationBas
UTIL1.waitFor(30000, () -> {
for (Procedure<?> proc : UTIL1.getMiniHBaseCluster().getMaster().getProcedures()) {
if (proc instanceof DisablePeerProcedure) {
return ((DisablePeerProcedure) proc).getCurrentStateId() ==
MasterProcedureProtos.PeerModificationState.POST_PEER_MODIFICATION_VALUE;
return ((DisablePeerProcedure) proc)
.getCurrentStateId() == PeerModificationState.POST_PEER_MODIFICATION_VALUE;
}
}
return false;

View File

@ -288,11 +288,15 @@ public class TestZKMulti {
// test that, even with operations that fail, the ones that would pass will pass
// with runSequentialOnMultiFailure
ops = new LinkedList<>();
ops.add(ZKUtilOp.setData(path1, Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1)))); // pass
ops.add(ZKUtilOp.deleteNodeFailSilent(path2)); // pass
ops.add(ZKUtilOp.deleteNodeFailSilent(path3)); // fail -- node doesn't exist
ops.add(ZKUtilOp.createAndFailSilent(path4,
Bytes.add(Bytes.toBytes(path4), Bytes.toBytes(path4)))); // pass
// pass
ops.add(ZKUtilOp.setData(path1, Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1))));
// pass
ops.add(ZKUtilOp.deleteNodeFailSilent(path2));
// fail -- node doesn't exist
ops.add(ZKUtilOp.deleteNodeFailSilent(path3));
// pass
ops.add(
ZKUtilOp.createAndFailSilent(path4, Bytes.add(Bytes.toBytes(path4), Bytes.toBytes(path4))));
ZKUtil.multiOrSequential(zkw, ops, true);
assertTrue(Bytes.equals(ZKUtil.getData(zkw, path1),
Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1))));