HBASE-26922 Fix LineLength warnings as much as possible if it can not be fixed by spotless (#4324)
Signed-off-by: Yulin Niu <niuyulin@apache.org
(cherry picked from commit 3ae0d9012c
)
This commit is contained in:
parent
abb1bf2617
commit
913dd9c305
|
@ -69,8 +69,8 @@ public class StreamSlowMonitor implements ConfigurationObserver {
|
||||||
*/
|
*/
|
||||||
private static final String DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY =
|
private static final String DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY =
|
||||||
"hbase.regionserver.async.wal.datanode.slow.check.speed.packet.data.length.min";
|
"hbase.regionserver.async.wal.datanode.slow.check.speed.packet.data.length.min";
|
||||||
private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH =
|
// 64KB
|
||||||
64 * 1024; //64KB
|
private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH = 64 * 1024;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Configure for the slow packet process time, a duration from send to ACK.
|
* Configure for the slow packet process time, a duration from send to ACK.
|
||||||
|
@ -79,7 +79,8 @@ public class StreamSlowMonitor implements ConfigurationObserver {
|
||||||
*/
|
*/
|
||||||
public static final String DATANODE_SLOW_PACKET_PROCESS_TIME_KEY =
|
public static final String DATANODE_SLOW_PACKET_PROCESS_TIME_KEY =
|
||||||
"hbase.regionserver.async.wal.datanode.slow.packet.process.time.millis";
|
"hbase.regionserver.async.wal.datanode.slow.packet.process.time.millis";
|
||||||
private static final long DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME = 6000; // 6s in ms
|
// 6s in ms
|
||||||
|
private static final long DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME = 6000;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Configure for the check of large packet(which is configured by
|
* Configure for the check of large packet(which is configured by
|
||||||
|
@ -89,7 +90,8 @@ public class StreamSlowMonitor implements ConfigurationObserver {
|
||||||
*/
|
*/
|
||||||
private static final String DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED_KEY =
|
private static final String DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED_KEY =
|
||||||
"hbase.regionserver.async.wal.datanode.slow.packet.speed.min.kbs";
|
"hbase.regionserver.async.wal.datanode.slow.packet.speed.min.kbs";
|
||||||
private static final double DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED = 20; // 20KB/s
|
// 20KB/s
|
||||||
|
private static final double DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED = 20;
|
||||||
|
|
||||||
private final String name;
|
private final String name;
|
||||||
// this is a map of datanodeInfo->queued slow PacketAckData
|
// this is a map of datanodeInfo->queued slow PacketAckData
|
||||||
|
|
|
@ -103,8 +103,8 @@ class CatalogReplicaLoadBalanceSimpleSelector implements
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private final ConcurrentMap<TableName, ConcurrentNavigableMap<byte[], StaleLocationCacheEntry>>
|
private final ConcurrentMap<TableName,
|
||||||
staleCache = new ConcurrentHashMap<>();
|
ConcurrentNavigableMap<byte[], StaleLocationCacheEntry>> staleCache = new ConcurrentHashMap<>();
|
||||||
private volatile int numOfReplicas;
|
private volatile int numOfReplicas;
|
||||||
private final ChoreService choreService;
|
private final ChoreService choreService;
|
||||||
private final TableName tableName;
|
private final TableName tableName;
|
||||||
|
|
|
@ -101,7 +101,6 @@ public class ColumnCountGetFilter extends FilterBase {
|
||||||
/**
|
/**
|
||||||
* @param pbBytes A pb serialized {@link ColumnCountGetFilter} instance
|
* @param pbBytes A pb serialized {@link ColumnCountGetFilter} instance
|
||||||
* @return An instance of {@link ColumnCountGetFilter} made from <code>bytes</code>
|
* @return An instance of {@link ColumnCountGetFilter} made from <code>bytes</code>
|
||||||
* @throws org.apache.hadoop.hbase.exceptions.DeserializationException
|
|
||||||
* @see #toByteArray
|
* @see #toByteArray
|
||||||
*/
|
*/
|
||||||
public static ColumnCountGetFilter parseFrom(final byte [] pbBytes)
|
public static ColumnCountGetFilter parseFrom(final byte [] pbBytes)
|
||||||
|
|
|
@ -241,8 +241,8 @@ public class FuzzyRowFilter extends FilterBase {
|
||||||
|
|
||||||
byte[] nextRow() {
|
byte[] nextRow() {
|
||||||
if (nextRows.isEmpty()) {
|
if (nextRows.isEmpty()) {
|
||||||
throw new IllegalStateException(
|
throw new IllegalStateException("NextRows should not be empty, "
|
||||||
"NextRows should not be empty, make sure to call nextRow() after updateTracker() return true");
|
+ "make sure to call nextRow() after updateTracker() return true");
|
||||||
} else {
|
} else {
|
||||||
return nextRows.peek().getFirst();
|
return nextRows.peek().getFirst();
|
||||||
}
|
}
|
||||||
|
|
|
@ -399,7 +399,6 @@ public class SingleColumnValueFilter extends FilterBase {
|
||||||
/**
|
/**
|
||||||
* @param pbBytes A pb serialized {@link SingleColumnValueFilter} instance
|
* @param pbBytes A pb serialized {@link SingleColumnValueFilter} instance
|
||||||
* @return An instance of {@link SingleColumnValueFilter} made from <code>bytes</code>
|
* @return An instance of {@link SingleColumnValueFilter} made from <code>bytes</code>
|
||||||
* @throws org.apache.hadoop.hbase.exceptions.DeserializationException
|
|
||||||
* @see #toByteArray
|
* @see #toByteArray
|
||||||
*/
|
*/
|
||||||
public static SingleColumnValueFilter parseFrom(final byte [] pbBytes)
|
public static SingleColumnValueFilter parseFrom(final byte [] pbBytes)
|
||||||
|
|
|
@ -2958,9 +2958,7 @@ public final class ProtobufUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates {@link CompactionState} from
|
* Creates {@link CompactionState} from {@link GetRegionInfoResponse.CompactionState} state
|
||||||
* {@link org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState}
|
|
||||||
* state
|
|
||||||
* @param state the protobuf CompactionState
|
* @param state the protobuf CompactionState
|
||||||
* @return CompactionState
|
* @return CompactionState
|
||||||
*/
|
*/
|
||||||
|
@ -2973,9 +2971,7 @@ public final class ProtobufUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates {@link CompactionState} from
|
* Creates {@link CompactionState} from {@link RegionLoad.CompactionState} state
|
||||||
* {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos
|
|
||||||
* .RegionLoad.CompactionState} state
|
|
||||||
* @param state the protobuf CompactionState
|
* @param state the protobuf CompactionState
|
||||||
* @return CompactionState
|
* @return CompactionState
|
||||||
*/
|
*/
|
||||||
|
@ -2995,8 +2991,7 @@ public final class ProtobufUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type}
|
* Creates {@link SnapshotProtos.SnapshotDescription.Type} from {@link SnapshotType}
|
||||||
* from {@link SnapshotType}
|
|
||||||
* @param type the SnapshotDescription type
|
* @param type the SnapshotDescription type
|
||||||
* @return the protobuf SnapshotDescription type
|
* @return the protobuf SnapshotDescription type
|
||||||
*/
|
*/
|
||||||
|
@ -3006,8 +3001,8 @@ public final class ProtobufUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type}
|
* Creates {@link SnapshotProtos.SnapshotDescription.Type} from the type of SnapshotDescription
|
||||||
* from the type of SnapshotDescription string
|
* string
|
||||||
* @param snapshotDesc string representing the snapshot description type
|
* @param snapshotDesc string representing the snapshot description type
|
||||||
* @return the protobuf SnapshotDescription type
|
* @return the protobuf SnapshotDescription type
|
||||||
*/
|
*/
|
||||||
|
@ -3017,8 +3012,7 @@ public final class ProtobufUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates {@link SnapshotType} from the type of
|
* Creates {@link SnapshotType} from the {@link SnapshotProtos.SnapshotDescription.Type}
|
||||||
* {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription}
|
|
||||||
* @param type the snapshot description type
|
* @param type the snapshot description type
|
||||||
* @return the protobuf SnapshotDescription type
|
* @return the protobuf SnapshotDescription type
|
||||||
*/
|
*/
|
||||||
|
@ -3027,8 +3021,7 @@ public final class ProtobufUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert from {@link SnapshotDescription} to
|
* Convert from {@link SnapshotDescription} to {@link SnapshotProtos.SnapshotDescription}
|
||||||
* {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription}
|
|
||||||
* @param snapshotDesc the POJO SnapshotDescription
|
* @param snapshotDesc the POJO SnapshotDescription
|
||||||
* @return the protobuf SnapshotDescription
|
* @return the protobuf SnapshotDescription
|
||||||
*/
|
*/
|
||||||
|
@ -3062,9 +3055,7 @@ public final class ProtobufUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert from
|
* Convert from {@link SnapshotProtos.SnapshotDescription} to {@link SnapshotDescription}
|
||||||
* {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} to
|
|
||||||
* {@link SnapshotDescription}
|
|
||||||
* @param snapshotDesc the protobuf SnapshotDescription
|
* @param snapshotDesc the protobuf SnapshotDescription
|
||||||
* @return the POJO SnapshotDescription
|
* @return the POJO SnapshotDescription
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -38,9 +38,9 @@ import org.apache.yetus.audience.InterfaceStability;
|
||||||
* {@link BBKVComparator}. Use it when mostly {@link ByteBufferKeyValue}s.
|
* {@link BBKVComparator}. Use it when mostly {@link ByteBufferKeyValue}s.
|
||||||
* </p>
|
* </p>
|
||||||
*/
|
*/
|
||||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "UNKNOWN",
|
||||||
value="UNKNOWN",
|
justification = "Findbugs doesn't like the way we are negating the result of"
|
||||||
justification="Findbugs doesn't like the way we are negating the result of a compare in below")
|
+ " a compare in below")
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public class CellComparatorImpl implements CellComparator {
|
public class CellComparatorImpl implements CellComparator {
|
||||||
|
|
|
@ -73,13 +73,14 @@ public interface ExtendedCell extends RawCell, HeapSize {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param withTags Whether to write tags.
|
* KeyValue format
|
||||||
* @return Bytes count required to serialize this Cell in a {@link KeyValue} format.
|
* <p/>
|
||||||
* <br> KeyValue format <br>
|
|
||||||
* <code><4 bytes keylength> <4 bytes valuelength> <2 bytes rowlength>
|
* <code><4 bytes keylength> <4 bytes valuelength> <2 bytes rowlength>
|
||||||
* <row> <1 byte columnfamilylength> <columnfamily> <columnqualifier>
|
* <row> <1 byte columnfamilylength> <columnfamily> <columnqualifier>
|
||||||
* <8 bytes timestamp> <1 byte keytype> <value> <2 bytes tagslength>
|
* <8 bytes timestamp> <1 byte keytype> <value> <2 bytes tagslength>
|
||||||
* <tags></code>
|
* <tags></code>
|
||||||
|
* @param withTags Whether to write tags.
|
||||||
|
* @return Bytes count required to serialize this Cell in a {@link KeyValue} format.
|
||||||
*/
|
*/
|
||||||
// TODO remove the boolean param once HBASE-16706 is done.
|
// TODO remove the boolean param once HBASE-16706 is done.
|
||||||
default int getSerializedSize(boolean withTags) {
|
default int getSerializedSize(boolean withTags) {
|
||||||
|
|
|
@ -2369,21 +2369,23 @@ public class KeyValue implements ExtendedCell, Cloneable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* HeapSize implementation
|
* HeapSize implementation
|
||||||
*
|
* <p/>
|
||||||
* We do not count the bytes in the rowCache because it should be empty for a KeyValue in the
|
* We do not count the bytes in the rowCache because it should be empty for a KeyValue in the
|
||||||
* MemStore.
|
* MemStore.
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public long heapSize() {
|
public long heapSize() {
|
||||||
/*
|
// Deep object overhead for this KV consists of two parts. The first part is the KV object
|
||||||
* Deep object overhead for this KV consists of two parts. The first part is the KV object
|
// itself, while the second part is the backing byte[]. We will only count the array overhead
|
||||||
* itself, while the second part is the backing byte[]. We will only count the array overhead
|
// from the byte[] only if this is the first KV in there.
|
||||||
* from the byte[] only if this is the first KV in there.
|
int fixed = ClassSize.align(FIXED_OVERHEAD);
|
||||||
*/
|
if (offset == 0) {
|
||||||
return ClassSize.align(FIXED_OVERHEAD) +
|
// count both length and object overhead
|
||||||
(offset == 0
|
return fixed + ClassSize.sizeOfByteArray(length);
|
||||||
? ClassSize.sizeOfByteArray(length) // count both length and object overhead
|
} else {
|
||||||
: length); // only count the number of bytes
|
// only count the number of bytes
|
||||||
|
return fixed + length;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1589,11 +1589,13 @@ public final class PrivateCellUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class FirstOnRowCell extends EmptyCell {
|
private static class FirstOnRowCell extends EmptyCell {
|
||||||
|
// @formatter:off
|
||||||
private static final int FIXED_HEAPSIZE =
|
private static final int FIXED_HEAPSIZE =
|
||||||
ClassSize.OBJECT // object
|
ClassSize.OBJECT // object
|
||||||
+ ClassSize.REFERENCE // row array
|
+ ClassSize.REFERENCE // row array
|
||||||
+ Bytes.SIZEOF_INT // row offset
|
+ Bytes.SIZEOF_INT // row offset
|
||||||
+ Bytes.SIZEOF_SHORT; // row length
|
+ Bytes.SIZEOF_SHORT; // row length
|
||||||
|
// @formatter:on
|
||||||
private final byte[] rowArray;
|
private final byte[] rowArray;
|
||||||
private final int roffset;
|
private final int roffset;
|
||||||
private final short rlength;
|
private final short rlength;
|
||||||
|
@ -1643,11 +1645,13 @@ public final class PrivateCellUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class FirstOnRowByteBufferExtendedCell extends EmptyByteBufferExtendedCell {
|
private static class FirstOnRowByteBufferExtendedCell extends EmptyByteBufferExtendedCell {
|
||||||
|
// @formatter:off
|
||||||
private static final int FIXED_OVERHEAD =
|
private static final int FIXED_OVERHEAD =
|
||||||
ClassSize.OBJECT // object
|
ClassSize.OBJECT // object
|
||||||
+ ClassSize.REFERENCE // row buffer
|
+ ClassSize.REFERENCE // row buffer
|
||||||
+ Bytes.SIZEOF_INT // row offset
|
+ Bytes.SIZEOF_INT // row offset
|
||||||
+ Bytes.SIZEOF_SHORT; // row length
|
+ Bytes.SIZEOF_SHORT; // row length
|
||||||
|
// @formatter:on
|
||||||
private final ByteBuffer rowBuff;
|
private final ByteBuffer rowBuff;
|
||||||
private final int roffset;
|
private final int roffset;
|
||||||
private final short rlength;
|
private final short rlength;
|
||||||
|
@ -1698,11 +1702,12 @@ public final class PrivateCellUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class LastOnRowByteBufferExtendedCell extends EmptyByteBufferExtendedCell {
|
private static class LastOnRowByteBufferExtendedCell extends EmptyByteBufferExtendedCell {
|
||||||
private static final int FIXED_OVERHEAD =
|
// @formatter:off
|
||||||
ClassSize.OBJECT // object
|
private static final int FIXED_OVERHEAD = ClassSize.OBJECT // object
|
||||||
+ ClassSize.REFERENCE // rowBuff
|
+ ClassSize.REFERENCE // rowBuff
|
||||||
+ Bytes.SIZEOF_INT // roffset
|
+ Bytes.SIZEOF_INT // roffset
|
||||||
+ Bytes.SIZEOF_SHORT; // rlength
|
+ Bytes.SIZEOF_SHORT; // rlength
|
||||||
|
// @formatter:on
|
||||||
private final ByteBuffer rowBuff;
|
private final ByteBuffer rowBuff;
|
||||||
private final int roffset;
|
private final int roffset;
|
||||||
private final short rlength;
|
private final short rlength;
|
||||||
|
@ -1754,11 +1759,12 @@ public final class PrivateCellUtil {
|
||||||
|
|
||||||
private static class FirstOnRowColByteBufferExtendedCell
|
private static class FirstOnRowColByteBufferExtendedCell
|
||||||
extends FirstOnRowByteBufferExtendedCell {
|
extends FirstOnRowByteBufferExtendedCell {
|
||||||
private static final int FIXED_OVERHEAD =
|
// @formatter:off
|
||||||
FirstOnRowByteBufferExtendedCell.FIXED_OVERHEAD
|
private static final int FIXED_OVERHEAD = FirstOnRowByteBufferExtendedCell.FIXED_OVERHEAD
|
||||||
+ ClassSize.REFERENCE * 2 // family buffer and column buffer
|
+ ClassSize.REFERENCE * 2 // family buffer and column buffer
|
||||||
+ Bytes.SIZEOF_INT * 3 // famOffset, colOffset, colLength
|
+ Bytes.SIZEOF_INT * 3 // famOffset, colOffset, colLength
|
||||||
+ Bytes.SIZEOF_BYTE; // famLength
|
+ Bytes.SIZEOF_BYTE; // famLength
|
||||||
|
// @formatter:on
|
||||||
private final ByteBuffer famBuff;
|
private final ByteBuffer famBuff;
|
||||||
private final int famOffset;
|
private final int famOffset;
|
||||||
private final byte famLength;
|
private final byte famLength;
|
||||||
|
@ -1823,11 +1829,12 @@ public final class PrivateCellUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class FirstOnRowColCell extends FirstOnRowCell {
|
private static class FirstOnRowColCell extends FirstOnRowCell {
|
||||||
private static final long FIXED_HEAPSIZE =
|
// @formatter:off
|
||||||
FirstOnRowCell.FIXED_HEAPSIZE
|
private static final long FIXED_HEAPSIZE = FirstOnRowCell.FIXED_HEAPSIZE
|
||||||
+ Bytes.SIZEOF_BYTE // flength
|
+ Bytes.SIZEOF_BYTE // flength
|
||||||
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
|
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
|
||||||
+ ClassSize.REFERENCE * 2; // fArray, qArray
|
+ ClassSize.REFERENCE * 2; // fArray, qArray
|
||||||
|
// @formatter:on
|
||||||
private final byte[] fArray;
|
private final byte[] fArray;
|
||||||
private final int foffset;
|
private final int foffset;
|
||||||
private final byte flength;
|
private final byte flength;
|
||||||
|
@ -1886,10 +1893,11 @@ public final class PrivateCellUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class FirstOnRowColTSCell extends FirstOnRowColCell {
|
private static class FirstOnRowColTSCell extends FirstOnRowColCell {
|
||||||
private static final long FIXED_HEAPSIZE =
|
// @formatter:off
|
||||||
FirstOnRowColCell.FIXED_HEAPSIZE
|
private static final long FIXED_HEAPSIZE = FirstOnRowColCell.FIXED_HEAPSIZE
|
||||||
+ Bytes.SIZEOF_LONG; // ts
|
+ Bytes.SIZEOF_LONG; // ts
|
||||||
private long ts;
|
private long ts;
|
||||||
|
// @formatter:on
|
||||||
|
|
||||||
public FirstOnRowColTSCell(byte[] rArray, int roffset, short rlength, byte[] fArray,
|
public FirstOnRowColTSCell(byte[] rArray, int roffset, short rlength, byte[] fArray,
|
||||||
int foffset, byte flength, byte[] qArray, int qoffset, int qlength, long ts) {
|
int foffset, byte flength, byte[] qArray, int qoffset, int qlength, long ts) {
|
||||||
|
@ -1910,10 +1918,11 @@ public final class PrivateCellUtil {
|
||||||
|
|
||||||
private static class FirstOnRowColTSByteBufferExtendedCell
|
private static class FirstOnRowColTSByteBufferExtendedCell
|
||||||
extends FirstOnRowColByteBufferExtendedCell {
|
extends FirstOnRowColByteBufferExtendedCell {
|
||||||
private static final int FIXED_OVERHEAD =
|
// @formatter:off
|
||||||
FirstOnRowColByteBufferExtendedCell.FIXED_OVERHEAD
|
private static final int FIXED_OVERHEAD = FirstOnRowColByteBufferExtendedCell.FIXED_OVERHEAD
|
||||||
+ Bytes.SIZEOF_LONG; // ts
|
+ Bytes.SIZEOF_LONG; // ts
|
||||||
private long ts;
|
private long ts;
|
||||||
|
// @formatter:on
|
||||||
|
|
||||||
public FirstOnRowColTSByteBufferExtendedCell(ByteBuffer rBuffer, int roffset, short rlength,
|
public FirstOnRowColTSByteBufferExtendedCell(ByteBuffer rBuffer, int roffset, short rlength,
|
||||||
ByteBuffer fBuffer, int foffset, byte flength, ByteBuffer qBuffer, int qoffset, int qlength,
|
ByteBuffer fBuffer, int foffset, byte flength, ByteBuffer qBuffer, int qoffset, int qlength,
|
||||||
|
@ -1934,11 +1943,12 @@ public final class PrivateCellUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class LastOnRowCell extends EmptyCell {
|
private static class LastOnRowCell extends EmptyCell {
|
||||||
private static final int FIXED_OVERHEAD =
|
// @formatter:off
|
||||||
ClassSize.OBJECT // object
|
private static final int FIXED_OVERHEAD = ClassSize.OBJECT // object
|
||||||
+ ClassSize.REFERENCE // row array
|
+ ClassSize.REFERENCE // row array
|
||||||
+ Bytes.SIZEOF_INT // row offset
|
+ Bytes.SIZEOF_INT // row offset
|
||||||
+ Bytes.SIZEOF_SHORT; // row length
|
+ Bytes.SIZEOF_SHORT; // row length
|
||||||
|
// @formatter:on
|
||||||
private final byte[] rowArray;
|
private final byte[] rowArray;
|
||||||
private final int roffset;
|
private final int roffset;
|
||||||
private final short rlength;
|
private final short rlength;
|
||||||
|
@ -1988,10 +1998,12 @@ public final class PrivateCellUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class LastOnRowColCell extends LastOnRowCell {
|
private static class LastOnRowColCell extends LastOnRowCell {
|
||||||
|
// @formatter:off
|
||||||
private static final long FIXED_OVERHEAD = LastOnRowCell.FIXED_OVERHEAD
|
private static final long FIXED_OVERHEAD = LastOnRowCell.FIXED_OVERHEAD
|
||||||
+ ClassSize.REFERENCE * 2 // fArray and qArray
|
+ ClassSize.REFERENCE * 2 // fArray and qArray
|
||||||
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
|
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
|
||||||
+ Bytes.SIZEOF_BYTE; // flength
|
+ Bytes.SIZEOF_BYTE; // flength
|
||||||
|
// @formatter:on
|
||||||
private final byte[] fArray;
|
private final byte[] fArray;
|
||||||
private final int foffset;
|
private final int foffset;
|
||||||
private final byte flength;
|
private final byte flength;
|
||||||
|
@ -2050,11 +2062,12 @@ public final class PrivateCellUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class LastOnRowColByteBufferExtendedCell extends LastOnRowByteBufferExtendedCell {
|
private static class LastOnRowColByteBufferExtendedCell extends LastOnRowByteBufferExtendedCell {
|
||||||
private static final int FIXED_OVERHEAD =
|
// @formatter:off
|
||||||
LastOnRowByteBufferExtendedCell.FIXED_OVERHEAD
|
private static final int FIXED_OVERHEAD = LastOnRowByteBufferExtendedCell.FIXED_OVERHEAD
|
||||||
+ ClassSize.REFERENCE * 2 // fBuffer and qBuffer
|
+ ClassSize.REFERENCE * 2 // fBuffer and qBuffer
|
||||||
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
|
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
|
||||||
+ Bytes.SIZEOF_BYTE; // flength
|
+ Bytes.SIZEOF_BYTE; // flength
|
||||||
|
// @formatter:on
|
||||||
private final ByteBuffer fBuffer;
|
private final ByteBuffer fBuffer;
|
||||||
private final int foffset;
|
private final int foffset;
|
||||||
private final byte flength;
|
private final byte flength;
|
||||||
|
@ -2119,11 +2132,12 @@ public final class PrivateCellUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class FirstOnRowDeleteFamilyCell extends EmptyCell {
|
private static class FirstOnRowDeleteFamilyCell extends EmptyCell {
|
||||||
private static final int FIXED_OVERHEAD =
|
// @formatter:off
|
||||||
ClassSize.OBJECT // object
|
private static final int FIXED_OVERHEAD = ClassSize.OBJECT // object
|
||||||
+ ClassSize.REFERENCE * 2 // fBuffer and qBuffer
|
+ ClassSize.REFERENCE * 2 // fBuffer and qBuffer
|
||||||
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
|
+ Bytes.SIZEOF_INT * 3 // foffset, qoffset, qlength
|
||||||
+ Bytes.SIZEOF_BYTE; // flength
|
+ Bytes.SIZEOF_BYTE; // flength
|
||||||
|
// @formatter:on
|
||||||
private final byte[] row;
|
private final byte[] row;
|
||||||
private final byte[] fam;
|
private final byte[] fam;
|
||||||
|
|
||||||
|
|
|
@ -164,17 +164,22 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
|
||||||
}
|
}
|
||||||
|
|
||||||
FileSystem fs = pathPattern.getFileSystem(conf);
|
FileSystem fs = pathPattern.getFileSystem(conf);
|
||||||
Path pathPattern1 = fs.isDirectory(pathPattern) ?
|
// append "*.jar" if a directory is specified
|
||||||
new Path(pathPattern, "*.jar") : pathPattern; // append "*.jar" if a directory is specified
|
Path pathPattern1 = fs.isDirectory(pathPattern) ? new Path(pathPattern, "*.jar") : pathPattern;
|
||||||
FileStatus[] fileStatuses = fs.globStatus(pathPattern1); // return all files that match the pattern
|
// return all files that match the pattern
|
||||||
if (fileStatuses == null || fileStatuses.length == 0) { // if no one matches
|
FileStatus[] fileStatuses = fs.globStatus(pathPattern1);
|
||||||
|
if (fileStatuses == null || fileStatuses.length == 0) {
|
||||||
|
// if no one matches
|
||||||
throw new FileNotFoundException(pathPattern1.toString());
|
throw new FileNotFoundException(pathPattern1.toString());
|
||||||
} else {
|
} else {
|
||||||
boolean validFileEncountered = false;
|
boolean validFileEncountered = false;
|
||||||
for (Path path : FileUtil.stat2Paths(fileStatuses)) { // for each file that match the pattern
|
// for each file that match the pattern
|
||||||
if (fs.isFile(path)) { // only process files, skip for directories
|
for (Path path : FileUtil.stat2Paths(fileStatuses)) {
|
||||||
File dst = new File(parentDirStr, "." + pathPrefix + "."
|
if (fs.isFile(path)) {
|
||||||
+ path.getName() + "." + EnvironmentEdgeManager.currentTime() + ".jar");
|
// only process files, skip for directories
|
||||||
|
File dst = new File(parentDirStr,
|
||||||
|
"." + pathPrefix + "." + path.getName() + "." + EnvironmentEdgeManager.currentTime()
|
||||||
|
+ ".jar");
|
||||||
fs.copyToLocalFile(path, new Path(dst.toString()));
|
fs.copyToLocalFile(path, new Path(dst.toString()));
|
||||||
dst.deleteOnExit();
|
dst.deleteOnExit();
|
||||||
|
|
||||||
|
@ -182,7 +187,8 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
|
||||||
|
|
||||||
JarFile jarFile = new JarFile(dst.toString());
|
JarFile jarFile = new JarFile(dst.toString());
|
||||||
try {
|
try {
|
||||||
Enumeration<JarEntry> entries = jarFile.entries(); // get entries inside a jar file
|
// get entries inside a jar file
|
||||||
|
Enumeration<JarEntry> entries = jarFile.entries();
|
||||||
while (entries.hasMoreElements()) {
|
while (entries.hasMoreElements()) {
|
||||||
JarEntry entry = entries.nextElement();
|
JarEntry entry = entries.nextElement();
|
||||||
Matcher m = libJarPattern.matcher(entry.getName());
|
Matcher m = libJarPattern.matcher(entry.getName());
|
||||||
|
@ -200,11 +206,12 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
|
||||||
} finally {
|
} finally {
|
||||||
jarFile.close();
|
jarFile.close();
|
||||||
}
|
}
|
||||||
|
// Set to true when encountering a file
|
||||||
validFileEncountered = true; // Set to true when encountering a file
|
validFileEncountered = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (validFileEncountered == false) { // all items returned by globStatus() are directories
|
if (validFileEncountered == false) {
|
||||||
|
// all items returned by globStatus() are directories
|
||||||
throw new FileNotFoundException("No file found matching " + pathPattern1.toString());
|
throw new FileNotFoundException("No file found matching " + pathPattern1.toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -303,8 +303,8 @@ public class DemoClient {
|
||||||
m = new Mutation();
|
m = new Mutation();
|
||||||
m.column = ByteBuffer.wrap(bytes("entry:sqr"));
|
m.column = ByteBuffer.wrap(bytes("entry:sqr"));
|
||||||
m.isDelete = true;
|
m.isDelete = true;
|
||||||
client.mutateRowTs(demoTable, ByteBuffer.wrap(row), mutations, 1,
|
// shouldn't override latest
|
||||||
dummyAttributes); // shouldn't override latest
|
client.mutateRowTs(demoTable, ByteBuffer.wrap(row), mutations, 1, dummyAttributes);
|
||||||
printRow(client.getRow(demoTable, ByteBuffer.wrap(row), dummyAttributes));
|
printRow(client.getRow(demoTable, ByteBuffer.wrap(row), dummyAttributes));
|
||||||
|
|
||||||
List<TCell> versions = client.getVer(demoTable, ByteBuffer.wrap(row),
|
List<TCell> versions = client.getVer(demoTable, ByteBuffer.wrap(row),
|
||||||
|
|
|
@ -483,8 +483,8 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo
|
||||||
String HEDGED_READ_WINS_DESC =
|
String HEDGED_READ_WINS_DESC =
|
||||||
"The number of times we started a hedged read and a hedged read won";
|
"The number of times we started a hedged read and a hedged read won";
|
||||||
String HEDGED_READ_IN_CUR_THREAD = "hedgedReadOpsInCurThread";
|
String HEDGED_READ_IN_CUR_THREAD = "hedgedReadOpsInCurThread";
|
||||||
String HEDGED_READ_IN_CUR_THREAD_DESC =
|
String HEDGED_READ_IN_CUR_THREAD_DESC = "The number of times we execute a hedged read"
|
||||||
"The number of times we execute a hedged read in current thread as a fallback for task rejection";
|
+ " in current thread as a fallback for task rejection";
|
||||||
|
|
||||||
String TOTAL_BYTES_READ = "totalBytesRead";
|
String TOTAL_BYTES_READ = "totalBytesRead";
|
||||||
String TOTAL_BYTES_READ_DESC = "The total number of bytes read from HDFS";
|
String TOTAL_BYTES_READ_DESC = "The total number of bytes read from HDFS";
|
||||||
|
|
|
@ -86,21 +86,20 @@ public class IntegrationTestIngestWithACL extends IntegrationTestIngest {
|
||||||
tmp.add(sb.toString());
|
tmp.add(sb.toString());
|
||||||
return tmp.toArray(new String[tmp.size()]);
|
return tmp.toArray(new String[tmp.size()]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void addOptions() {
|
protected void addOptions() {
|
||||||
super.addOptions();
|
super.addOptions();
|
||||||
super.addOptWithArg(OPT_SUPERUSER,
|
super.addOptWithArg(OPT_SUPERUSER, "Super user name used to add the ACL permissions");
|
||||||
"Super user name used to add the ACL permissions");
|
|
||||||
super.addOptWithArg(OPT_USERS,
|
super.addOptWithArg(OPT_USERS,
|
||||||
"List of users to be added with the ACLs. Should be comma seperated.");
|
"List of users to be added with the ACLs. Should be comma seperated.");
|
||||||
super
|
super.addOptWithArg(OPT_AUTHN,
|
||||||
.addOptWithArg(
|
"The name of the properties file that contains"
|
||||||
OPT_AUTHN,
|
+ " kerberos key tab file and principal definitions. The principal key in the file"
|
||||||
"The name of the properties file that contains kerberos key tab file and principal definitions. " +
|
+ " should be of the form hbase.<username>.kerberos.principal. The keytab key in the"
|
||||||
"The principal key in the file should be of the form hbase.<username>.kerberos.principal." +
|
+ " file should be of the form hbase.<username>.keytab.file. Example:"
|
||||||
" The keytab key in the file should be of the form hbase.<username>.keytab.file. Example: " +
|
+ " hbase.user1.kerberos.principal=user1/fully.qualified.domain.name@YOUR-REALM.COM,"
|
||||||
"hbase.user1.kerberos.principal=user1/fully.qualified.domain.name@YOUR-REALM.COM, " +
|
+ " hbase.user1.keytab.file=<filelocation>.");
|
||||||
"hbase.user1.keytab.file=<filelocation>.");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -49,11 +49,12 @@ public class ServerAndDependenciesKillingMonkeyFactory extends MonkeyFactory {
|
||||||
loadProperties();
|
loadProperties();
|
||||||
|
|
||||||
// Destructive actions to mess things around. Cannot run batch restart.
|
// Destructive actions to mess things around. Cannot run batch restart.
|
||||||
|
// @formatter:off
|
||||||
Action[] actions1 = new Action[] {
|
Action[] actions1 = new Action[] {
|
||||||
new RestartRandomRsExceptMetaAction(60000),
|
new RestartRandomRsExceptMetaAction(60000),
|
||||||
new RestartActiveMasterAction(5000),
|
new RestartActiveMasterAction(5000),
|
||||||
new RollingBatchRestartRsAction(5000, 1.0f, 2,
|
// only allow 2 servers to be dead.
|
||||||
true), // only allow 2 servers to be dead.
|
new RollingBatchRestartRsAction(5000, 1.0f, 2, true),
|
||||||
new ForceBalancerAction(),
|
new ForceBalancerAction(),
|
||||||
new RestartRandomDataNodeAction(60000),
|
new RestartRandomDataNodeAction(60000),
|
||||||
new RestartRandomZKNodeAction(60000),
|
new RestartRandomZKNodeAction(60000),
|
||||||
|
@ -61,6 +62,7 @@ public class ServerAndDependenciesKillingMonkeyFactory extends MonkeyFactory {
|
||||||
new RollingBatchSuspendResumeRsAction(rollingBatchSuspendRSSleepTime,
|
new RollingBatchSuspendResumeRsAction(rollingBatchSuspendRSSleepTime,
|
||||||
rollingBatchSuspendtRSRatio)
|
rollingBatchSuspendtRSRatio)
|
||||||
};
|
};
|
||||||
|
// @formatter:on
|
||||||
|
|
||||||
// Action to log more info for debugging
|
// Action to log more info for debugging
|
||||||
Action[] actions2 = new Action[]{
|
Action[] actions2 = new Action[]{
|
||||||
|
|
|
@ -47,16 +47,18 @@ public class ServerKillingMonkeyFactory extends MonkeyFactory {
|
||||||
loadProperties();
|
loadProperties();
|
||||||
|
|
||||||
// Destructive actions to mess things around. Cannot run batch restart
|
// Destructive actions to mess things around. Cannot run batch restart
|
||||||
|
// @formatter:off
|
||||||
Action[] actions1 = new Action[] {
|
Action[] actions1 = new Action[] {
|
||||||
new RestartRandomRsExceptMetaAction(60000),
|
new RestartRandomRsExceptMetaAction(60000),
|
||||||
new RestartActiveMasterAction(5000),
|
new RestartActiveMasterAction(5000),
|
||||||
new RollingBatchRestartRsAction(5000, 1.0f, 2,
|
// only allow 2 servers to be dead
|
||||||
true), //only allow 2 servers to be dead
|
new RollingBatchRestartRsAction(5000, 1.0f, 2, true),
|
||||||
new ForceBalancerAction(),
|
new ForceBalancerAction(),
|
||||||
new GracefulRollingRestartRsAction(gracefulRollingRestartTSSLeepTime),
|
new GracefulRollingRestartRsAction(gracefulRollingRestartTSSLeepTime),
|
||||||
new RollingBatchSuspendResumeRsAction(rollingBatchSuspendRSSleepTime,
|
new RollingBatchSuspendResumeRsAction(rollingBatchSuspendRSSleepTime,
|
||||||
rollingBatchSuspendtRSRatio)
|
rollingBatchSuspendtRSRatio)
|
||||||
};
|
};
|
||||||
|
// @formatter:on
|
||||||
|
|
||||||
// Action to log more info for debugging
|
// Action to log more info for debugging
|
||||||
Action[] actions2 = new Action[] {
|
Action[] actions2 = new Action[] {
|
||||||
|
|
|
@ -42,7 +42,7 @@ import org.apache.hadoop.mapred.Reporter;
|
||||||
* A Base for {@link TableInputFormat}s. Receives a {@link Table}, a
|
* A Base for {@link TableInputFormat}s. Receives a {@link Table}, a
|
||||||
* byte[] of input columns and optionally a {@link Filter}.
|
* byte[] of input columns and optionally a {@link Filter}.
|
||||||
* Subclasses may use other TableRecordReader implementations.
|
* Subclasses may use other TableRecordReader implementations.
|
||||||
*
|
* <p/>
|
||||||
* Subclasses MUST ensure initializeTable(Connection, TableName) is called for an instance to
|
* Subclasses MUST ensure initializeTable(Connection, TableName) is called for an instance to
|
||||||
* function properly. Each of the entry points to this class used by the MapReduce framework,
|
* function properly. Each of the entry points to this class used by the MapReduce framework,
|
||||||
* {@link #getRecordReader(InputSplit, JobConf, Reporter)} and {@link #getSplits(JobConf, int)},
|
* {@link #getRecordReader(InputSplit, JobConf, Reporter)} and {@link #getSplits(JobConf, int)},
|
||||||
|
@ -98,8 +98,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
||||||
* Builds a TableRecordReader. If no TableRecordReader was provided, uses
|
* Builds a TableRecordReader. If no TableRecordReader was provided, uses
|
||||||
* the default.
|
* the default.
|
||||||
*
|
*
|
||||||
* @see org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit,
|
* @see InputFormat#getRecordReader(InputSplit, JobConf, Reporter)
|
||||||
* JobConf, Reporter)
|
|
||||||
*/
|
*/
|
||||||
public RecordReader<ImmutableBytesWritable, Result> getRecordReader(
|
public RecordReader<ImmutableBytesWritable, Result> getRecordReader(
|
||||||
InputSplit split, JobConf job, Reporter reporter)
|
InputSplit split, JobConf job, Reporter reporter)
|
||||||
|
@ -165,7 +164,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculates the splits that will serve as input for the map tasks.
|
* Calculates the splits that will serve as input for the map tasks.
|
||||||
*
|
* <p/>
|
||||||
* Splits are created in number equal to the smallest between numSplits and
|
* Splits are created in number equal to the smallest between numSplits and
|
||||||
* the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table.
|
* the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table.
|
||||||
* If the number of splits is smaller than the number of
|
* If the number of splits is smaller than the number of
|
||||||
|
@ -180,7 +179,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
||||||
*
|
*
|
||||||
* @return the input splits
|
* @return the input splits
|
||||||
*
|
*
|
||||||
* @see org.apache.hadoop.mapred.InputFormat#getSplits(org.apache.hadoop.mapred.JobConf, int)
|
* @see InputFormat#getSplits(org.apache.hadoop.mapred.JobConf, int)
|
||||||
*/
|
*/
|
||||||
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
|
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
|
||||||
if (this.table == null) {
|
if (this.table == null) {
|
||||||
|
@ -282,7 +281,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
||||||
* will call {@link #initialize(JobConf)} as a convenient centralized location to handle
|
* will call {@link #initialize(JobConf)} as a convenient centralized location to handle
|
||||||
* retrieving the necessary configuration information and calling
|
* retrieving the necessary configuration information and calling
|
||||||
* {@link #initializeTable(Connection, TableName)}.
|
* {@link #initializeTable(Connection, TableName)}.
|
||||||
*
|
* <p/>
|
||||||
* Subclasses should implement their initialize call such that it is safe to call multiple times.
|
* Subclasses should implement their initialize call such that it is safe to call multiple times.
|
||||||
* The current TableInputFormatBase implementation relies on a non-null table reference to decide
|
* The current TableInputFormatBase implementation relies on a non-null table reference to decide
|
||||||
* if an initialize call is needed, but this behavior may change in the future. In particular,
|
* if an initialize call is needed, but this behavior may change in the future. In particular,
|
||||||
|
|
|
@ -72,9 +72,7 @@ public abstract class MultiTableInputFormatBase extends
|
||||||
* @return The newly created record reader.
|
* @return The newly created record reader.
|
||||||
* @throws IOException When creating the reader fails.
|
* @throws IOException When creating the reader fails.
|
||||||
* @throws InterruptedException when record reader initialization fails
|
* @throws InterruptedException when record reader initialization fails
|
||||||
* @see org.apache.hadoop.mapreduce.InputFormat#createRecordReader(
|
* @see InputFormat#createRecordReader(InputSplit, TaskAttemptContext)
|
||||||
* org.apache.hadoop.mapreduce.InputSplit,
|
|
||||||
* org.apache.hadoop.mapreduce.TaskAttemptContext)
|
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public RecordReader<ImmutableBytesWritable, Result> createRecordReader(
|
public RecordReader<ImmutableBytesWritable, Result> createRecordReader(
|
||||||
|
@ -152,7 +150,7 @@ public abstract class MultiTableInputFormatBase extends
|
||||||
* @param context The current job context.
|
* @param context The current job context.
|
||||||
* @return The list of input splits.
|
* @return The list of input splits.
|
||||||
* @throws IOException When creating the list of splits fails.
|
* @throws IOException When creating the list of splits fails.
|
||||||
* @see org.apache.hadoop.mapreduce.InputFormat#getSplits(org.apache.hadoop.mapreduce.JobContext)
|
* @see InputFormat#getSplits(org.apache.hadoop.mapreduce.JobContext)
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public List<InputSplit> getSplits(JobContext context) throws IOException {
|
public List<InputSplit> getSplits(JobContext context) throws IOException {
|
||||||
|
|
|
@ -49,8 +49,7 @@ import java.util.Map;
|
||||||
* included in each snapshot/scan
|
* included in each snapshot/scan
|
||||||
* pair.
|
* pair.
|
||||||
* {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob
|
* {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob
|
||||||
* (java.util.Map, Class, Class, Class, org.apache.hadoop.mapreduce.Job, boolean, org.apache
|
* (Map, Class, Class, Class, org.apache.hadoop.mapreduce.Job, boolean, Path)}
|
||||||
* .hadoop.fs.Path)}
|
|
||||||
* can be used to configure the job.
|
* can be used to configure the job.
|
||||||
* <pre>{@code
|
* <pre>{@code
|
||||||
* Job job = new Job(conf);
|
* Job job = new Job(conf);
|
||||||
|
|
|
@ -78,8 +78,7 @@ public class MultiTableSnapshotInputFormatImpl {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the list of splits extracted from the scans/snapshots pushed to conf by
|
* Return the list of splits extracted from the scans/snapshots pushed to conf by
|
||||||
* {@link
|
* {@link #setInput(Configuration, Map, Path)}
|
||||||
* #setInput(org.apache.hadoop.conf.Configuration, java.util.Map, org.apache.hadoop.fs.Path)}
|
|
||||||
*
|
*
|
||||||
* @param conf Configuration to determine splits from
|
* @param conf Configuration to determine splits from
|
||||||
* @return Return the list of splits extracted from the scans/snapshots pushed to conf
|
* @return Return the list of splits extracted from the scans/snapshots pushed to conf
|
||||||
|
@ -115,7 +114,7 @@ public class MultiTableSnapshotInputFormatImpl {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Retrieve the snapshot name -> list<scan> mapping pushed to configuration by
|
* Retrieve the snapshot name -> list<scan> mapping pushed to configuration by
|
||||||
* {@link #setSnapshotToScans(org.apache.hadoop.conf.Configuration, java.util.Map)}
|
* {@link #setSnapshotToScans(Configuration, Map)}
|
||||||
*
|
*
|
||||||
* @param conf Configuration to extract name -> list<scan> mappings from.
|
* @param conf Configuration to extract name -> list<scan> mappings from.
|
||||||
* @return the snapshot name -> list<scan> mapping pushed to configuration
|
* @return the snapshot name -> list<scan> mapping pushed to configuration
|
||||||
|
|
|
@ -232,7 +232,8 @@ public class MultithreadedTableMapper<K2, V2> extends TableMapper<K2, V2> {
|
||||||
}
|
}
|
||||||
|
|
||||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION",
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION",
|
||||||
justification="Don't understand why FB is complaining about this one. We do throw exception")
|
justification = "Don't understand why FB is complaining about this one."
|
||||||
|
+ " We do throw exception")
|
||||||
private class MapRunner implements Runnable {
|
private class MapRunner implements Runnable {
|
||||||
private Mapper<ImmutableBytesWritable, Result, K2,V2> mapper;
|
private Mapper<ImmutableBytesWritable, Result, K2,V2> mapper;
|
||||||
private Context subcontext;
|
private Context subcontext;
|
||||||
|
|
|
@ -65,10 +65,10 @@ import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Scans a given table + CF for all mob reference cells to get the list of backing mob files.
|
* Scans a given table + CF for all mob reference cells to get the list of backing mob files. For
|
||||||
* For each referenced file we attempt to verify that said file is on the FileSystem in a place
|
* each referenced file we attempt to verify that said file is on the FileSystem in a place that the
|
||||||
* that the MOB system will look when attempting to resolve the actual value.
|
* MOB system will look when attempting to resolve the actual value.
|
||||||
*
|
* <p/>
|
||||||
* The job includes counters that can help provide a rough sketch of the mob data.
|
* The job includes counters that can help provide a rough sketch of the mob data.
|
||||||
*
|
*
|
||||||
* <pre>
|
* <pre>
|
||||||
|
@ -94,31 +94,31 @@ import org.slf4j.LoggerFactory;
|
||||||
* Number of rows with total size in the 100,000s of bytes=6838
|
* Number of rows with total size in the 100,000s of bytes=6838
|
||||||
* Number of rows with total size in the 1,000,000s of bytes=3162
|
* Number of rows with total size in the 1,000,000s of bytes=3162
|
||||||
* </pre>
|
* </pre>
|
||||||
*
|
* <ol>
|
||||||
* * Map-Reduce Framework:Map input records - the number of rows with mob references
|
* <li>Map-Reduce Framework:Map input records - the number of rows with mob references</li>
|
||||||
* * Map-Reduce Framework:Reduce output records - the number of unique hfiles referenced
|
* <li>Map-Reduce Framework:Reduce output records - the number of unique hfiles referenced</li>
|
||||||
* * MOB:NUM_CELLS - the total number of mob reference cells
|
* <li>MOB:NUM_CELLS - the total number of mob reference cells</li>
|
||||||
* * PROBLEM:Affected rows - the number of rows that reference hfiles with an issue
|
* <li>PROBLEM:Affected rows - the number of rows that reference hfiles with an issue</li>
|
||||||
* * PROBLEM:Problem MOB files - the number of unique hfiles that have an issue
|
* <li>PROBLEM:Problem MOB files - the number of unique hfiles that have an issue</li>
|
||||||
* * CELLS PER ROW: - this counter group gives a histogram of the order of magnitude of the
|
* <li>CELLS PER ROW: - this counter group gives a histogram of the order of magnitude of the number
|
||||||
* number of cells in a given row by grouping by the number of digits used in each count.
|
* of cells in a given row by grouping by the number of digits used in each count. This allows us to
|
||||||
* This allows us to see more about the distribution of cells than what we can determine
|
* see more about the distribution of cells than what we can determine with just the cell count and
|
||||||
* with just the cell count and the row count. In this particular example we can see that
|
* the row count. In this particular example we can see that all of our rows have somewhere between
|
||||||
* all of our rows have somewhere between 1 - 9 cells.
|
* 1 - 9 cells.</li>
|
||||||
* * ROWS WITH PROBLEMS PER FILE: - this counter group gives a histogram of the order of
|
* <li>ROWS WITH PROBLEMS PER FILE: - this counter group gives a histogram of the order of magnitude
|
||||||
* magnitude of the number of rows in each of the hfiles with a problem. e.g. in the
|
* of the number of rows in each of the hfiles with a problem. e.g. in the example there are 2
|
||||||
* example there are 2 hfiles and they each have the same order of magnitude number of rows,
|
* hfiles and they each have the same order of magnitude number of rows, specifically between 100
|
||||||
* specifically between 100 and 999.
|
* and 999.</li>
|
||||||
* * SIZES OF CELLS: - this counter group gives a histogram of the order of magnitude of
|
* <li>SIZES OF CELLS: - this counter group gives a histogram of the order of magnitude of the size
|
||||||
* the size of mob values according to our reference cells. e.g. in the example above we
|
* of mob values according to our reference cells. e.g. in the example above we have cell sizes that
|
||||||
* have cell sizes that are all between 10,000 bytes and 9,999,999 bytes. From this
|
* are all between 10,000 bytes and 9,999,999 bytes. From this histogram we can also see that _most_
|
||||||
* histogram we can also see that _most_ cells are 100,000 - 999,000 bytes and the smaller
|
* cells are 100,000 - 999,000 bytes and the smaller and bigger ones are outliers making up less
|
||||||
* and bigger ones are outliers making up less than 2% of mob cells.
|
* than 2% of mob cells.</li>
|
||||||
* * SIZES OF ROWS: - this counter group gives a histogram of the order of magnitude of the
|
* <li>SIZES OF ROWS: - this counter group gives a histogram of the order of magnitude of the size
|
||||||
* size of mob values across each row according to our reference cells. In the example above
|
* of mob values across each row according to our reference cells. In the example above we have rows
|
||||||
* we have rows that are are between 100,000 bytes and 9,999,999 bytes. We can also see that
|
* that are are between 100,000 bytes and 9,999,999 bytes. We can also see that about 2/3rd of our
|
||||||
* about 2/3rd of our rows are 100,000 - 999,999 bytes.
|
* rows are 100,000 - 999,999 bytes.</li>
|
||||||
*
|
* </ol>
|
||||||
* Generates a report that gives one file status per line, with tabs dividing fields.
|
* Generates a report that gives one file status per line, with tabs dividing fields.
|
||||||
*
|
*
|
||||||
* <pre>
|
* <pre>
|
||||||
|
@ -133,32 +133,31 @@ import org.slf4j.LoggerFactory;
|
||||||
* </pre>
|
* </pre>
|
||||||
*
|
*
|
||||||
* Possible results are listed; the first three indicate things are working properly.
|
* Possible results are listed; the first three indicate things are working properly.
|
||||||
* * MOB DIR - the reference is in the normal MOB area for the given table and CF
|
* <ol>
|
||||||
* * HLINK TO ARCHIVE FOR SAME TABLE - the reference is present in the archive area for this
|
* <li>MOB DIR - the reference is in the normal MOB area for the given table and CF</li>
|
||||||
* table and CF
|
* <li>HLINK TO ARCHIVE FOR SAME TABLE - the reference is present in the archive area for this table
|
||||||
* * HLINK TO ARCHIVE FOR OTHER TABLE - the reference is present in a different table and CF,
|
* and CF</li>
|
||||||
* either in the MOB or archive areas (e.g. from a snapshot restore or clone)
|
* <li>HLINK TO ARCHIVE FOR OTHER TABLE - the reference is present in a different table and CF,
|
||||||
* * ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE - the reference is currently present in the archive
|
* either in the MOB or archive areas (e.g. from a snapshot restore or clone)</li>
|
||||||
* area for this table and CF, but it is kept there because a _different_ table has a
|
* <li>ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE - the reference is currently present in the archive
|
||||||
* reference to it (e.g. from a snapshot clone). If these other tables are removed then
|
* area for this table and CF, but it is kept there because a _different_ table has a reference to
|
||||||
* the file will likely be deleted unless there is a snapshot also referencing it.
|
* it (e.g. from a snapshot clone). If these other tables are removed then the file will likely be
|
||||||
* * ARCHIVE BUT NO HLINKS - the reference is currently present in the archive for this table and
|
* deleted unless there is a snapshot also referencing it.</li>
|
||||||
* CF, but there are no references present to prevent its removal. Unless it is newer than
|
* <li>ARCHIVE BUT NO HLINKS - the reference is currently present in the archive for this table and
|
||||||
* the general TTL (default 5 minutes) or referenced in a snapshot it will be subject to
|
* CF, but there are no references present to prevent its removal. Unless it is newer than the
|
||||||
* cleaning.
|
* general TTL (default 5 minutes) or referenced in a snapshot it will be subject to cleaning.</li>
|
||||||
* * ARCHIVE BUT FAILURE WHILE CHECKING HLINKS - Check the job logs to see why things failed while
|
* <li>ARCHIVE BUT FAILURE WHILE CHECKING HLINKS - Check the job logs to see why things failed while
|
||||||
* looking for why this file is being kept around.
|
* looking for why this file is being kept around.</li>
|
||||||
* * MISSING FILE - We couldn't find the reference on the FileSystem. Either there is dataloss due
|
* <li>MISSING FILE - We couldn't find the reference on the FileSystem. Either there is dataloss due
|
||||||
* to a bug in the MOB storage system or the MOB storage is damaged but in an edge case that
|
* to a bug in the MOB storage system or the MOB storage is damaged but in an edge case that allows
|
||||||
* allows it to work for now. You can verify which by doing a raw reference scan to get the
|
* it to work for now. You can verify which by doing a raw reference scan to get the referenced
|
||||||
* referenced hfile and check the underlying filesystem. See the ref guide section on mob
|
* hfile and check the underlying filesystem. See the ref guide section on mob for details.</li>
|
||||||
* for details.
|
* <li>HLINK BUT POINT TO MISSING FILE - There is a pointer in our mob area for this table and CF to
|
||||||
* * HLINK BUT POINT TO MISSING FILE - There is a pointer in our mob area for this table and CF
|
* a file elsewhere on the FileSystem, however the file it points to no longer exists.</li>
|
||||||
* to a file elsewhere on the FileSystem, however the file it points to no longer exists.
|
* <li>MISSING FILE BUT FAILURE WHILE CHECKING HLINKS - We could not find the referenced file,
|
||||||
* * MISSING FILE BUT FAILURE WHILE CHECKING HLINKS - We could not find the referenced file,
|
* however you should check the job logs to see why we couldn't check to see if there is a pointer
|
||||||
* however you should check the job logs to see why we couldn't check to see if there is a
|
* to the referenced file in our archive or another table's archive or mob area.</li>
|
||||||
* pointer to the referenced file in our archive or another table's archive or mob area.
|
* </ol>
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class MobRefReporter extends Configured implements Tool {
|
public class MobRefReporter extends Configured implements Tool {
|
||||||
|
|
|
@ -298,14 +298,15 @@ public class TestImportExport {
|
||||||
IMPORT_TABLE, FQ_OUTPUT_DIR
|
IMPORT_TABLE, FQ_OUTPUT_DIR
|
||||||
};
|
};
|
||||||
assertTrue(runImport(args));
|
assertTrue(runImport(args));
|
||||||
/* exportedTableIn94Format contains 5 rows
|
// @formatter:off
|
||||||
ROW COLUMN+CELL
|
// exportedTableIn94Format contains 5 rows
|
||||||
r1 column=f1:c1, timestamp=1383766761171, value=val1
|
// ROW COLUMN+CELL
|
||||||
r2 column=f1:c1, timestamp=1383766771642, value=val2
|
// r1 column=f1:c1, timestamp=1383766761171, value=val1
|
||||||
r3 column=f1:c1, timestamp=1383766777615, value=val3
|
// r2 column=f1:c1, timestamp=1383766771642, value=val2
|
||||||
r4 column=f1:c1, timestamp=1383766785146, value=val4
|
// r3 column=f1:c1, timestamp=1383766777615, value=val3
|
||||||
r5 column=f1:c1, timestamp=1383766791506, value=val5
|
// r4 column=f1:c1, timestamp=1383766785146, value=val4
|
||||||
*/
|
// r5 column=f1:c1, timestamp=1383766791506, value=val5
|
||||||
|
// @formatter:on
|
||||||
assertEquals(5, UTIL.countRows(t));
|
assertEquals(5, UTIL.countRows(t));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -331,12 +332,9 @@ public class TestImportExport {
|
||||||
p.addColumn(FAMILYA, QUAL, now + 3, QUAL);
|
p.addColumn(FAMILYA, QUAL, now + 3, QUAL);
|
||||||
p.addColumn(FAMILYA, QUAL, now + 4, QUAL);
|
p.addColumn(FAMILYA, QUAL, now + 4, QUAL);
|
||||||
t.put(p);
|
t.put(p);
|
||||||
|
// added scanner batching arg.
|
||||||
String[] args = new String[] {
|
String[] args = new String[] { "-D" + ExportUtils.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE,
|
||||||
"-D" + ExportUtils.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE, // added scanner batching arg.
|
name.getMethodName(), FQ_OUTPUT_DIR };
|
||||||
name.getMethodName(),
|
|
||||||
FQ_OUTPUT_DIR
|
|
||||||
};
|
|
||||||
assertTrue(runExport(args));
|
assertTrue(runExport(args));
|
||||||
|
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
|
|
|
@ -48,8 +48,9 @@ public class TestRowModel extends TestModelBase<RowModel> {
|
||||||
public TestRowModel() throws Exception {
|
public TestRowModel() throws Exception {
|
||||||
super(RowModel.class);
|
super(RowModel.class);
|
||||||
AS_XML =
|
AS_XML =
|
||||||
"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><Row key=\"dGVzdHJvdzE=\">" +
|
"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>" + "<Row key=\"dGVzdHJvdzE=\">"
|
||||||
"<Cell column=\"dGVzdGNvbHVtbjE=\" timestamp=\"1245219839331\">dGVzdHZhbHVlMQ==</Cell></Row>";
|
+ "<Cell column=\"dGVzdGNvbHVtbjE=\" timestamp=\"1245219839331\">dGVzdHZhbHVlMQ==</Cell>"
|
||||||
|
+ "</Row>";
|
||||||
|
|
||||||
AS_JSON =
|
AS_JSON =
|
||||||
"{\"key\":\"dGVzdHJvdzE=\",\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\"," +
|
"{\"key\":\"dGVzdHJvdzE=\",\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\"," +
|
||||||
|
|
|
@ -157,7 +157,6 @@ public class ForeignException extends IOException {
|
||||||
* @param bytes
|
* @param bytes
|
||||||
* @return the ForeignExcpetion instance
|
* @return the ForeignExcpetion instance
|
||||||
* @throws InvalidProtocolBufferException if there was deserialization problem this is thrown.
|
* @throws InvalidProtocolBufferException if there was deserialization problem this is thrown.
|
||||||
* @throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException
|
|
||||||
*/
|
*/
|
||||||
public static ForeignException deserialize(byte[] bytes)
|
public static ForeignException deserialize(byte[] bytes)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
|
@ -534,13 +534,13 @@ public final class HFile {
|
||||||
/**
|
/**
|
||||||
* @param fs filesystem
|
* @param fs filesystem
|
||||||
* @param path Path to file to read
|
* @param path Path to file to read
|
||||||
* @param cacheConf This must not be null. @see
|
* @param cacheConf This must not be null.
|
||||||
* {@link org.apache.hadoop.hbase.io.hfile.CacheConfig#CacheConfig(Configuration)}
|
|
||||||
* @param primaryReplicaReader true if this is a reader for primary replica
|
* @param primaryReplicaReader true if this is a reader for primary replica
|
||||||
* @param conf Configuration
|
* @param conf Configuration
|
||||||
* @return an active Reader instance
|
* @return an active Reader instance
|
||||||
* @throws IOException Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile
|
* @throws IOException Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile
|
||||||
* is corrupt/invalid.
|
* is corrupt/invalid.
|
||||||
|
* @see CacheConfig#CacheConfig(Configuration)
|
||||||
*/
|
*/
|
||||||
public static Reader createReader(FileSystem fs, Path path, CacheConfig cacheConf,
|
public static Reader createReader(FileSystem fs, Path path, CacheConfig cacheConf,
|
||||||
boolean primaryReplicaReader, Configuration conf) throws IOException {
|
boolean primaryReplicaReader, Configuration conf) throws IOException {
|
||||||
|
|
|
@ -149,11 +149,10 @@ public class HMasterCommandLine extends ServerCommandLine {
|
||||||
if (shutDownCluster) {
|
if (shutDownCluster) {
|
||||||
return stopMaster();
|
return stopMaster();
|
||||||
}
|
}
|
||||||
System.err.println(
|
System.err.println("To shutdown the master run "
|
||||||
"To shutdown the master run " +
|
+ "hbase-daemon.sh stop master or send a kill signal to the HMaster pid, "
|
||||||
"hbase-daemon.sh stop master or send a kill signal to " +
|
+ "and to stop HBase Cluster run \"stop-hbase.sh\" or \"hbase master "
|
||||||
"the HMaster pid, " +
|
+ "stop --shutDownCluster\"");
|
||||||
"and to stop HBase Cluster run \"stop-hbase.sh\" or \"hbase master stop --shutDownCluster\"");
|
|
||||||
return 1;
|
return 1;
|
||||||
} else if ("clear".equals(command)) {
|
} else if ("clear".equals(command)) {
|
||||||
return (ZNodeClearer.clear(getConf()) ? 0 : 1);
|
return (ZNodeClearer.clear(getConf()) ? 0 : 1);
|
||||||
|
|
|
@ -293,9 +293,9 @@ public class MasterWalManager {
|
||||||
splitLog(serverNames, META_FILTER);
|
splitLog(serverNames, META_FILTER);
|
||||||
}
|
}
|
||||||
|
|
||||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UL_UNRELEASED_LOCK", justification=
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "UL_UNRELEASED_LOCK",
|
||||||
"We only release this lock when we set it. Updates to code that uses it should verify use " +
|
justification = "We only release this lock when we set it. Updates to code "
|
||||||
"of the guard boolean.")
|
+ "that uses it should verify use of the guard boolean.")
|
||||||
List<Path> getLogDirs(final Set<ServerName> serverNames) throws IOException {
|
List<Path> getLogDirs(final Set<ServerName> serverNames) throws IOException {
|
||||||
List<Path> logDirs = new ArrayList<>();
|
List<Path> logDirs = new ArrayList<>();
|
||||||
boolean needReleaseLock = false;
|
boolean needReleaseLock = false;
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.master;
|
package org.apache.hadoop.hbase.master;
|
||||||
|
|
||||||
import static org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective.CHECK;
|
import static org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective.CHECK;
|
||||||
import static org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective.FORCE;
|
import static org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective.FORCE;
|
||||||
import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.DELETED;
|
import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.DELETED;
|
||||||
|
@ -155,10 +156,10 @@ public class SplitLogManager {
|
||||||
/**
|
/**
|
||||||
* Get a list of paths that need to be split given a set of server-specific directories and
|
* Get a list of paths that need to be split given a set of server-specific directories and
|
||||||
* optionally a filter.
|
* optionally a filter.
|
||||||
*
|
* <p/>
|
||||||
* See {@link AbstractFSWALProvider#getServerNameFromWALDirectoryName} for more info on directory
|
* See {@link AbstractFSWALProvider#getServerNameFromWALDirectoryName} for more info on directory
|
||||||
* layout.
|
* layout.
|
||||||
*
|
* <p/>
|
||||||
* Should be package-private, but is needed by
|
* Should be package-private, but is needed by
|
||||||
* {@link org.apache.hadoop.hbase.wal.WALSplitter#split(Path, Path, Path, FileSystem,
|
* {@link org.apache.hadoop.hbase.wal.WALSplitter#split(Path, Path, Path, FileSystem,
|
||||||
* Configuration, org.apache.hadoop.hbase.wal.WALFactory)} for tests.
|
* Configuration, org.apache.hadoop.hbase.wal.WALFactory)} for tests.
|
||||||
|
|
|
@ -274,11 +274,11 @@ class SimpleRegionNormalizer implements RegionNormalizer, ConfigurationObserver
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* Also make sure tableRegions contains regions of the same table
|
||||||
* @param tableRegions regions of table to normalize
|
* @param tableRegions regions of table to normalize
|
||||||
* @param tableDescriptor the TableDescriptor
|
* @param tableDescriptor the TableDescriptor
|
||||||
* @return average region size depending on
|
* @return average region size depending on
|
||||||
* @see org.apache.hadoop.hbase.client.TableDescriptor#getNormalizerTargetRegionCount()
|
* @see TableDescriptor#getNormalizerTargetRegionCount()
|
||||||
* Also make sure tableRegions contains regions of the same table
|
|
||||||
*/
|
*/
|
||||||
private double getAverageRegionSizeMb(final List<RegionInfo> tableRegions,
|
private double getAverageRegionSizeMb(final List<RegionInfo> tableRegions,
|
||||||
final TableDescriptor tableDescriptor) {
|
final TableDescriptor tableDescriptor) {
|
||||||
|
|
|
@ -64,8 +64,9 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloneSnapshotState;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloneSnapshotState;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
@ -310,8 +311,7 @@ public class CloneSnapshotProcedure
|
||||||
throws IOException {
|
throws IOException {
|
||||||
super.serializeStateData(serializer);
|
super.serializeStateData(serializer);
|
||||||
|
|
||||||
MasterProcedureProtos.CloneSnapshotStateData.Builder cloneSnapshotMsg =
|
CloneSnapshotStateData.Builder cloneSnapshotMsg = CloneSnapshotStateData.newBuilder()
|
||||||
MasterProcedureProtos.CloneSnapshotStateData.newBuilder()
|
|
||||||
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
|
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
|
||||||
.setSnapshot(this.snapshot)
|
.setSnapshot(this.snapshot)
|
||||||
.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
|
.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
|
||||||
|
@ -328,8 +328,8 @@ public class CloneSnapshotProcedure
|
||||||
while (it.hasNext()) {
|
while (it.hasNext()) {
|
||||||
final Map.Entry<String, Pair<String, String>> entry = it.next();
|
final Map.Entry<String, Pair<String, String>> entry = it.next();
|
||||||
|
|
||||||
MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder parentToChildrenPair =
|
RestoreParentToChildRegionsPair.Builder parentToChildrenPair =
|
||||||
MasterProcedureProtos.RestoreParentToChildRegionsPair.newBuilder()
|
RestoreParentToChildRegionsPair.newBuilder()
|
||||||
.setParentRegionName(entry.getKey())
|
.setParentRegionName(entry.getKey())
|
||||||
.setChild1RegionName(entry.getValue().getFirst())
|
.setChild1RegionName(entry.getValue().getFirst())
|
||||||
.setChild2RegionName(entry.getValue().getSecond());
|
.setChild2RegionName(entry.getValue().getSecond());
|
||||||
|
@ -347,8 +347,7 @@ public class CloneSnapshotProcedure
|
||||||
throws IOException {
|
throws IOException {
|
||||||
super.deserializeStateData(serializer);
|
super.deserializeStateData(serializer);
|
||||||
|
|
||||||
MasterProcedureProtos.CloneSnapshotStateData cloneSnapshotMsg =
|
CloneSnapshotStateData cloneSnapshotMsg = serializer.deserialize(CloneSnapshotStateData.class);
|
||||||
serializer.deserialize(MasterProcedureProtos.CloneSnapshotStateData.class);
|
|
||||||
setUser(MasterProcedureUtil.toUserInfo(cloneSnapshotMsg.getUserInfo()));
|
setUser(MasterProcedureUtil.toUserInfo(cloneSnapshotMsg.getUserInfo()));
|
||||||
snapshot = cloneSnapshotMsg.getSnapshot();
|
snapshot = cloneSnapshotMsg.getSnapshot();
|
||||||
tableDescriptor = ProtobufUtil.toTableDescriptor(cloneSnapshotMsg.getTableSchema());
|
tableDescriptor = ProtobufUtil.toTableDescriptor(cloneSnapshotMsg.getTableSchema());
|
||||||
|
@ -365,8 +364,8 @@ public class CloneSnapshotProcedure
|
||||||
}
|
}
|
||||||
if (cloneSnapshotMsg.getParentToChildRegionsPairListCount() > 0) {
|
if (cloneSnapshotMsg.getParentToChildRegionsPairListCount() > 0) {
|
||||||
parentsToChildrenPairMap = new HashMap<>();
|
parentsToChildrenPairMap = new HashMap<>();
|
||||||
for (MasterProcedureProtos.RestoreParentToChildRegionsPair parentToChildrenPair:
|
for (RestoreParentToChildRegionsPair parentToChildrenPair : cloneSnapshotMsg
|
||||||
cloneSnapshotMsg.getParentToChildRegionsPairListList()) {
|
.getParentToChildRegionsPairListList()) {
|
||||||
parentsToChildrenPairMap.put(
|
parentsToChildrenPairMap.put(
|
||||||
parentToChildrenPair.getParentRegionName(),
|
parentToChildrenPair.getParentRegionName(),
|
||||||
new Pair<>(
|
new Pair<>(
|
||||||
|
|
|
@ -58,8 +58,9 @@ import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
@ -239,8 +240,7 @@ public class RestoreSnapshotProcedure
|
||||||
throws IOException {
|
throws IOException {
|
||||||
super.serializeStateData(serializer);
|
super.serializeStateData(serializer);
|
||||||
|
|
||||||
MasterProcedureProtos.RestoreSnapshotStateData.Builder restoreSnapshotMsg =
|
RestoreSnapshotStateData.Builder restoreSnapshotMsg = RestoreSnapshotStateData.newBuilder()
|
||||||
MasterProcedureProtos.RestoreSnapshotStateData.newBuilder()
|
|
||||||
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
|
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
|
||||||
.setSnapshot(this.snapshot)
|
.setSnapshot(this.snapshot)
|
||||||
.setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor));
|
.setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor));
|
||||||
|
@ -266,8 +266,8 @@ public class RestoreSnapshotProcedure
|
||||||
while (it.hasNext()) {
|
while (it.hasNext()) {
|
||||||
final Map.Entry<String, Pair<String, String>> entry = it.next();
|
final Map.Entry<String, Pair<String, String>> entry = it.next();
|
||||||
|
|
||||||
MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder parentToChildrenPair =
|
RestoreParentToChildRegionsPair.Builder parentToChildrenPair =
|
||||||
MasterProcedureProtos.RestoreParentToChildRegionsPair.newBuilder()
|
RestoreParentToChildRegionsPair.newBuilder()
|
||||||
.setParentRegionName(entry.getKey())
|
.setParentRegionName(entry.getKey())
|
||||||
.setChild1RegionName(entry.getValue().getFirst())
|
.setChild1RegionName(entry.getValue().getFirst())
|
||||||
.setChild2RegionName(entry.getValue().getSecond());
|
.setChild2RegionName(entry.getValue().getSecond());
|
||||||
|
@ -283,8 +283,8 @@ public class RestoreSnapshotProcedure
|
||||||
throws IOException {
|
throws IOException {
|
||||||
super.deserializeStateData(serializer);
|
super.deserializeStateData(serializer);
|
||||||
|
|
||||||
MasterProcedureProtos.RestoreSnapshotStateData restoreSnapshotMsg =
|
RestoreSnapshotStateData restoreSnapshotMsg =
|
||||||
serializer.deserialize(MasterProcedureProtos.RestoreSnapshotStateData.class);
|
serializer.deserialize(RestoreSnapshotStateData.class);
|
||||||
setUser(MasterProcedureUtil.toUserInfo(restoreSnapshotMsg.getUserInfo()));
|
setUser(MasterProcedureUtil.toUserInfo(restoreSnapshotMsg.getUserInfo()));
|
||||||
snapshot = restoreSnapshotMsg.getSnapshot();
|
snapshot = restoreSnapshotMsg.getSnapshot();
|
||||||
modifiedTableDescriptor =
|
modifiedTableDescriptor =
|
||||||
|
@ -315,8 +315,8 @@ public class RestoreSnapshotProcedure
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (restoreSnapshotMsg.getParentToChildRegionsPairListCount() > 0) {
|
if (restoreSnapshotMsg.getParentToChildRegionsPairListCount() > 0) {
|
||||||
for (MasterProcedureProtos.RestoreParentToChildRegionsPair parentToChildrenPair:
|
for (RestoreParentToChildRegionsPair parentToChildrenPair : restoreSnapshotMsg
|
||||||
restoreSnapshotMsg.getParentToChildRegionsPairListList()) {
|
.getParentToChildRegionsPairListList()) {
|
||||||
parentsToChildrenPairMap.put(
|
parentsToChildrenPairMap.put(
|
||||||
parentToChildrenPair.getParentRegionName(),
|
parentToChildrenPair.getParentRegionName(),
|
||||||
new Pair<>(
|
new Pair<>(
|
||||||
|
|
|
@ -79,8 +79,8 @@ public class BalancerRejectionQueueService implements NamedQueueService {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!(namedQueuePayload instanceof BalancerRejectionDetails)) {
|
if (!(namedQueuePayload instanceof BalancerRejectionDetails)) {
|
||||||
LOG.warn(
|
LOG.warn("BalancerRejectionQueueService: NamedQueuePayload is not of type"
|
||||||
"BalancerRejectionQueueService: NamedQueuePayload is not of type BalancerRejectionDetails.");
|
+ " BalancerRejectionDetails.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
BalancerRejectionDetails balancerRejectionDetails = (BalancerRejectionDetails) namedQueuePayload;
|
BalancerRejectionDetails balancerRejectionDetails = (BalancerRejectionDetails) namedQueuePayload;
|
||||||
|
|
|
@ -35,8 +35,9 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC",
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC",
|
||||||
justification="FindBugs seems confused; says bypassGlobals, namepaceLimiters, and " +
|
justification = "FindBugs seems confused; says bypassGlobals, namepaceLimiters, and "
|
||||||
"tableLimiters are mostly synchronized...but to me it looks like they are totally synchronized")
|
+ "tableLimiters are mostly synchronized..."
|
||||||
|
+ "but to me it looks like they are totally synchronized")
|
||||||
public class UserQuotaState extends QuotaState {
|
public class UserQuotaState extends QuotaState {
|
||||||
private Map<String, QuotaLimiter> namespaceLimiters = null;
|
private Map<String, QuotaLimiter> namespaceLimiters = null;
|
||||||
private Map<TableName, QuotaLimiter> tableLimiters = null;
|
private Map<TableName, QuotaLimiter> tableLimiters = null;
|
||||||
|
|
|
@ -55,9 +55,11 @@ public abstract class AbstractMemStore implements MemStore {
|
||||||
|
|
||||||
protected RegionServicesForStores regionServices;
|
protected RegionServicesForStores regionServices;
|
||||||
|
|
||||||
|
// @formatter:off
|
||||||
public final static long FIXED_OVERHEAD = (long) ClassSize.OBJECT
|
public final static long FIXED_OVERHEAD = (long) ClassSize.OBJECT
|
||||||
+ (5 * ClassSize.REFERENCE)
|
+ (5 * ClassSize.REFERENCE)
|
||||||
+ (2 * Bytes.SIZEOF_LONG); // snapshotId, timeOfOldestEdit
|
+ (2 * Bytes.SIZEOF_LONG); // snapshotId, timeOfOldestEdit
|
||||||
|
// @formatter:on
|
||||||
|
|
||||||
public final static long DEEP_OVERHEAD = FIXED_OVERHEAD;
|
public final static long DEEP_OVERHEAD = FIXED_OVERHEAD;
|
||||||
|
|
||||||
|
|
|
@ -2088,8 +2088,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
||||||
throw new ServiceException(ie);
|
throw new ServiceException(ie);
|
||||||
}
|
}
|
||||||
// We are assigning meta, wait a little for regionserver to finish initialization.
|
// We are assigning meta, wait a little for regionserver to finish initialization.
|
||||||
int timeout = regionServer.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
|
// Default to quarter of RPC timeout
|
||||||
HConstants.DEFAULT_HBASE_RPC_TIMEOUT) >> 2; // Quarter of RPC timeout
|
int timeout = regionServer.getConfiguration()
|
||||||
|
.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT) >> 2;
|
||||||
long endTime = EnvironmentEdgeManager.currentTime() + timeout;
|
long endTime = EnvironmentEdgeManager.currentTime() + timeout;
|
||||||
synchronized (regionServer.online) {
|
synchronized (regionServer.online) {
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -200,10 +200,10 @@ public class ReplicationSource implements ReplicationSourceInterface {
|
||||||
this.waitOnEndpointSeconds =
|
this.waitOnEndpointSeconds =
|
||||||
this.conf.getInt(WAIT_ON_ENDPOINT_SECONDS, DEFAULT_WAIT_ON_ENDPOINT_SECONDS);
|
this.conf.getInt(WAIT_ON_ENDPOINT_SECONDS, DEFAULT_WAIT_ON_ENDPOINT_SECONDS);
|
||||||
decorateConf();
|
decorateConf();
|
||||||
this.sleepForRetries =
|
// 1 second
|
||||||
this.conf.getLong("replication.source.sleepforretries", 1000); // 1 second
|
this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000);
|
||||||
this.maxRetriesMultiplier =
|
// 5 minutes @ 1 sec per
|
||||||
this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes @ 1 sec per
|
this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
|
||||||
this.queueSizePerGroup = this.conf.getInt("hbase.regionserver.maxlogs", 32);
|
this.queueSizePerGroup = this.conf.getInt("hbase.regionserver.maxlogs", 32);
|
||||||
this.logQueue = new ReplicationSourceLogQueue(conf, metrics, this);
|
this.logQueue = new ReplicationSourceLogQueue(conf, metrics, this);
|
||||||
this.queueStorage = queueStorage;
|
this.queueStorage = queueStorage;
|
||||||
|
|
|
@ -81,12 +81,13 @@ public class ReplicationSourceShipper extends Thread {
|
||||||
this.walGroupId = walGroupId;
|
this.walGroupId = walGroupId;
|
||||||
this.logQueue = logQueue;
|
this.logQueue = logQueue;
|
||||||
this.source = source;
|
this.source = source;
|
||||||
this.sleepForRetries =
|
// 1 second
|
||||||
this.conf.getLong("replication.source.sleepforretries", 1000); // 1 second
|
this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000);
|
||||||
this.maxRetriesMultiplier =
|
// 5 minutes @ 1 sec per
|
||||||
this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes @ 1 sec per
|
this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
|
||||||
|
// 20 seconds
|
||||||
this.getEntriesTimeout =
|
this.getEntriesTimeout =
|
||||||
this.conf.getInt("replication.source.getEntries.timeout", DEFAULT_TIMEOUT); // 20 seconds
|
this.conf.getInt("replication.source.getEntries.timeout", DEFAULT_TIMEOUT);
|
||||||
this.shipEditsTimeout = this.conf.getInt(HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT,
|
this.shipEditsTimeout = this.conf.getInt(HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT,
|
||||||
HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT);
|
HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT);
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,10 +106,10 @@ class ReplicationSourceWALReader extends Thread {
|
||||||
int batchCount = conf.getInt("replication.source.nb.batches", 1);
|
int batchCount = conf.getInt("replication.source.nb.batches", 1);
|
||||||
this.totalBufferUsed = source.getSourceManager().getTotalBufferUsed();
|
this.totalBufferUsed = source.getSourceManager().getTotalBufferUsed();
|
||||||
this.totalBufferQuota = source.getSourceManager().getTotalBufferLimit();
|
this.totalBufferQuota = source.getSourceManager().getTotalBufferLimit();
|
||||||
this.sleepForRetries =
|
// 1 second
|
||||||
this.conf.getLong("replication.source.sleepforretries", 1000); // 1 second
|
this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000);
|
||||||
this.maxRetriesMultiplier =
|
// 5 minutes @ 1 sec per
|
||||||
this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes @ 1 sec per
|
this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
|
||||||
this.eofAutoRecovery = conf.getBoolean("replication.source.eof.autorecovery", false);
|
this.eofAutoRecovery = conf.getBoolean("replication.source.eof.autorecovery", false);
|
||||||
this.entryBatchQueue = new LinkedBlockingQueue<>(batchCount);
|
this.entryBatchQueue = new LinkedBlockingQueue<>(batchCount);
|
||||||
this.walGroupId = walGroupId;
|
this.walGroupId = walGroupId;
|
||||||
|
|
|
@ -72,7 +72,7 @@ public class TokenUtil {
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* See {@link ClientTokenUtil#toToken(org.apache.hadoop.security.token.Token)}.
|
* See {@link ClientTokenUtil#toToken(Token)}.
|
||||||
* @deprecated External users should not use this method. Please post on
|
* @deprecated External users should not use this method. Please post on
|
||||||
* the HBase dev mailing list if you need this method. Internal
|
* the HBase dev mailing list if you need this method. Internal
|
||||||
* HBase code should use {@link ClientTokenUtil} instead.
|
* HBase code should use {@link ClientTokenUtil} instead.
|
||||||
|
@ -83,8 +83,7 @@ public class TokenUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* See {@link ClientTokenUtil#obtainToken(org.apache.hadoop.hbase.client.Connection,
|
* See {@link ClientTokenUtil#obtainToken(Connection, User)}.
|
||||||
* org.apache.hadoop.hbase.security.User)}.
|
|
||||||
* @deprecated External users should not use this method. Please post on
|
* @deprecated External users should not use this method. Please post on
|
||||||
* the HBase dev mailing list if you need this method. Internal
|
* the HBase dev mailing list if you need this method. Internal
|
||||||
* HBase code should use {@link ClientTokenUtil} instead.
|
* HBase code should use {@link ClientTokenUtil} instead.
|
||||||
|
@ -96,8 +95,7 @@ public class TokenUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* See {@link ClientTokenUtil#obtainAndCacheToken(org.apache.hadoop.hbase.client.Connection,
|
* See {@link ClientTokenUtil#obtainAndCacheToken(Connection, User)}.
|
||||||
* org.apache.hadoop.hbase.security.User)}.
|
|
||||||
*/
|
*/
|
||||||
public static void obtainAndCacheToken(final Connection conn,
|
public static void obtainAndCacheToken(final Connection conn,
|
||||||
User user)
|
User user)
|
||||||
|
@ -106,7 +104,7 @@ public class TokenUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* See {@link ClientTokenUtil#toToken(org.apache.hadoop.security.token.Token)}.
|
* See {@link ClientTokenUtil#toToken(org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos.Token)}.
|
||||||
* @deprecated External users should not use this method. Please post on
|
* @deprecated External users should not use this method. Please post on
|
||||||
* the HBase dev mailing list if you need this method. Internal
|
* the HBase dev mailing list if you need this method. Internal
|
||||||
* HBase code should use {@link ClientTokenUtil} instead.
|
* HBase code should use {@link ClientTokenUtil} instead.
|
||||||
|
|
|
@ -167,8 +167,8 @@ public class EntryBuffers {
|
||||||
internify(entry);
|
internify(entry);
|
||||||
entries.add(entry);
|
entries.add(entry);
|
||||||
// TODO linkedlist entry
|
// TODO linkedlist entry
|
||||||
long incrHeap = entry.getEdit().heapSize() +
|
// entry size plus WALKey pointers
|
||||||
ClassSize.align(2 * ClassSize.REFERENCE); // WALKey pointers
|
long incrHeap = entry.getEdit().heapSize() + ClassSize.align(2 * ClassSize.REFERENCE);
|
||||||
heapInBuffer += incrHeap;
|
heapInBuffer += incrHeap;
|
||||||
return incrHeap;
|
return incrHeap;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2924,9 +2924,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
if (jobConf == null) {
|
if (jobConf == null) {
|
||||||
jobConf = mrCluster.createJobConf();
|
jobConf = mrCluster.createJobConf();
|
||||||
}
|
}
|
||||||
|
// Hadoop MiniMR overwrites this while it should not
|
||||||
jobConf.set("mapreduce.cluster.local.dir",
|
jobConf.set("mapreduce.cluster.local.dir", conf.get("mapreduce.cluster.local.dir"));
|
||||||
conf.get("mapreduce.cluster.local.dir")); //Hadoop MiniMR overwrites this while it should not
|
|
||||||
LOG.info("Mini mapreduce cluster started");
|
LOG.info("Mini mapreduce cluster started");
|
||||||
|
|
||||||
// In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
|
// In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
|
||||||
|
|
|
@ -223,8 +223,9 @@ public class TestRegionObserverScannerOpenHook {
|
||||||
Get get = new Get(ROW);
|
Get get = new Get(ROW);
|
||||||
Result r = region.get(get);
|
Result r = region.get(get);
|
||||||
assertNull(
|
assertNull(
|
||||||
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
|
"Got an unexpected number of rows - "
|
||||||
+ r, r.listCells());
|
+ "no data should be returned with the NoDataFromScan coprocessor. Found: " + r,
|
||||||
|
r.listCells());
|
||||||
HBaseTestingUtility.closeRegionAndWAL(region);
|
HBaseTestingUtility.closeRegionAndWAL(region);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -250,8 +251,9 @@ public class TestRegionObserverScannerOpenHook {
|
||||||
Get get = new Get(ROW);
|
Get get = new Get(ROW);
|
||||||
Result r = region.get(get);
|
Result r = region.get(get);
|
||||||
assertNull(
|
assertNull(
|
||||||
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
|
"Got an unexpected number of rows - "
|
||||||
+ r, r.listCells());
|
+ "no data should be returned with the NoDataFromScan coprocessor. Found: " + r,
|
||||||
|
r.listCells());
|
||||||
HBaseTestingUtility.closeRegionAndWAL(region);
|
HBaseTestingUtility.closeRegionAndWAL(region);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,7 +271,9 @@ public class TestRegionObserverScannerOpenHook {
|
||||||
}
|
}
|
||||||
|
|
||||||
public CountDownLatch getCompactionStateChangeLatch() {
|
public CountDownLatch getCompactionStateChangeLatch() {
|
||||||
if (compactionStateChangeLatch == null) compactionStateChangeLatch = new CountDownLatch(1);
|
if (compactionStateChangeLatch == null) {
|
||||||
|
compactionStateChangeLatch = new CountDownLatch(1);
|
||||||
|
}
|
||||||
return compactionStateChangeLatch;
|
return compactionStateChangeLatch;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -277,7 +281,9 @@ public class TestRegionObserverScannerOpenHook {
|
||||||
public boolean compact(CompactionContext compaction, HStore store,
|
public boolean compact(CompactionContext compaction, HStore store,
|
||||||
ThroughputController throughputController) throws IOException {
|
ThroughputController throughputController) throws IOException {
|
||||||
boolean ret = super.compact(compaction, store, throughputController);
|
boolean ret = super.compact(compaction, store, throughputController);
|
||||||
if (ret) compactionStateChangeLatch.countDown();
|
if (ret) {
|
||||||
|
compactionStateChangeLatch.countDown();
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,14 +347,16 @@ public class TestRegionObserverScannerOpenHook {
|
||||||
Get get = new Get(ROW);
|
Get get = new Get(ROW);
|
||||||
Result r = table.get(get);
|
Result r = table.get(get);
|
||||||
assertNull(
|
assertNull(
|
||||||
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
|
"Got an unexpected number of rows - "
|
||||||
+ r, r.listCells());
|
+ "no data should be returned with the NoDataFromScan coprocessor. Found: " + r,
|
||||||
|
r.listCells());
|
||||||
|
|
||||||
get = new Get(Bytes.toBytes("anotherrow"));
|
get = new Get(Bytes.toBytes("anotherrow"));
|
||||||
r = table.get(get);
|
r = table.get(get);
|
||||||
assertNull(
|
assertNull(
|
||||||
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor Found: "
|
"Got an unexpected number of rows - "
|
||||||
+ r, r.listCells());
|
+ "no data should be returned with the NoDataFromScan coprocessor Found: " + r,
|
||||||
|
r.listCells());
|
||||||
|
|
||||||
table.close();
|
table.close();
|
||||||
UTIL.shutdownMiniCluster();
|
UTIL.shutdownMiniCluster();
|
||||||
|
|
|
@ -193,8 +193,9 @@ public class TestFavoredNodeAssignmentHelper {
|
||||||
// the primary can be assigned but the secondary/tertiary would be null
|
// the primary can be assigned but the secondary/tertiary would be null
|
||||||
Map<String,Integer> rackToServerCount = new HashMap<>();
|
Map<String,Integer> rackToServerCount = new HashMap<>();
|
||||||
rackToServerCount.put("rack1", 1);
|
rackToServerCount.put("rack1", 1);
|
||||||
Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
|
Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper,
|
||||||
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
|
List<RegionInfo>> primaryRSMapAndHelper =
|
||||||
|
secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
|
||||||
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
|
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
|
||||||
Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
|
Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
|
||||||
List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
|
List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
|
||||||
|
|
|
@ -212,9 +212,10 @@ public class TestFixedFileTrailer {
|
||||||
String msg = ex.getMessage();
|
String msg = ex.getMessage();
|
||||||
String cleanMsg = msg.replaceAll(
|
String cleanMsg = msg.replaceAll(
|
||||||
"^(java(\\.[a-zA-Z]+)+:\\s+)?|\\s+\\(.*\\)\\s*$", "");
|
"^(java(\\.[a-zA-Z]+)+:\\s+)?|\\s+\\(.*\\)\\s*$", "");
|
||||||
assertEquals("Actual exception message is \"" + msg + "\".\n" +
|
// will be followed by " expected: ..."
|
||||||
"Cleaned-up message", // will be followed by " expected: ..."
|
assertEquals("Actual exception message is \"" + msg + "\".\nCleaned-up message",
|
||||||
"Invalid HFile version: " + invalidVersion, cleanMsg);
|
"Invalid HFile version: " + invalidVersion,
|
||||||
|
cleanMsg);
|
||||||
LOG.info("Got an expected exception: " + msg);
|
LOG.info("Got an expected exception: " + msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -309,8 +309,8 @@ public class TestHFileBlock {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGzipCompression() throws IOException {
|
public void testGzipCompression() throws IOException {
|
||||||
final String correctTestBlockStr =
|
// @formatter:off
|
||||||
"DATABLK*\\x00\\x00\\x00>\\x00\\x00\\x0F\\xA0\\xFF\\xFF\\xFF\\xFF"
|
String correctTestBlockStr = "DATABLK*\\x00\\x00\\x00>\\x00\\x00\\x0F\\xA0\\xFF\\xFF\\xFF\\xFF"
|
||||||
+ "\\xFF\\xFF\\xFF\\xFF"
|
+ "\\xFF\\xFF\\xFF\\xFF"
|
||||||
+ "\\x0" + ChecksumType.getDefaultChecksumType().getCode()
|
+ "\\x0" + ChecksumType.getDefaultChecksumType().getCode()
|
||||||
+ "\\x00\\x00@\\x00\\x00\\x00\\x00["
|
+ "\\x00\\x00@\\x00\\x00\\x00\\x00["
|
||||||
|
@ -329,8 +329,9 @@ public class TestHFileBlock {
|
||||||
+ "\\xD6\\xE8\\xA3\\xB9K\\x84`\\x96Q\\xD3\\xA8\\xDB\\xA8e\\xD4c"
|
+ "\\xD6\\xE8\\xA3\\xB9K\\x84`\\x96Q\\xD3\\xA8\\xDB\\xA8e\\xD4c"
|
||||||
+ "\\xD46\\xEA5\\xEA3\\xEA7\\xE7\\x00LI\\x5Cs\\xA0\\x0F\\x00\\x00"
|
+ "\\xD46\\xEA5\\xEA3\\xEA7\\xE7\\x00LI\\x5Cs\\xA0\\x0F\\x00\\x00"
|
||||||
+ "\\x00\\x00\\x00\\x00"; // 4 byte checksum (ignored)
|
+ "\\x00\\x00\\x00\\x00"; // 4 byte checksum (ignored)
|
||||||
final int correctGzipBlockLength = 95;
|
// @formatter:on
|
||||||
final String testBlockStr = createTestBlockStr(GZ, correctGzipBlockLength, false);
|
int correctGzipBlockLength = 95;
|
||||||
|
String testBlockStr = createTestBlockStr(GZ, correctGzipBlockLength, false);
|
||||||
// We ignore the block checksum because createTestBlockStr can change the
|
// We ignore the block checksum because createTestBlockStr can change the
|
||||||
// gzip header after the block is produced
|
// gzip header after the block is produced
|
||||||
assertEquals(correctTestBlockStr.substring(0, correctGzipBlockLength - 4),
|
assertEquals(correctTestBlockStr.substring(0, correctGzipBlockLength - 4),
|
||||||
|
|
|
@ -75,34 +75,24 @@ public class TestMetricsRegion {
|
||||||
// test region with replica id > 0
|
// test region with replica id > 0
|
||||||
mr = new MetricsRegion(new MetricsRegionWrapperStub(1), new Configuration());
|
mr = new MetricsRegion(new MetricsRegionWrapperStub(1), new Configuration());
|
||||||
agg = mr.getSource().getAggregateSource();
|
agg = mr.getSource().getAggregateSource();
|
||||||
HELPER.assertGauge(
|
HELPER.assertGauge("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
|
||||||
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_storeCount",
|
+ "_metric_storeCount", 101, agg);
|
||||||
101, agg);
|
HELPER.assertGauge("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
|
||||||
HELPER.assertGauge(
|
+ "_metric_storeFileCount", 102, agg);
|
||||||
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_storeFileCount",
|
HELPER.assertGauge("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
|
||||||
102, agg);
|
+ "_metric_memstoreSize", 103, agg);
|
||||||
HELPER.assertGauge(
|
HELPER.assertCounter("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
|
||||||
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_memstoreSize",
|
+ "_metric_filteredReadRequestCount", 107, agg);
|
||||||
103, agg);
|
HELPER.assertCounter("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
|
||||||
HELPER.assertCounter(
|
+ "_metric_replicaid", 1, agg);
|
||||||
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_" +
|
HELPER.assertCounter("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
|
||||||
"filteredReadRequestCount",
|
+ "_metric_compactionsQueuedCount", 4, agg);
|
||||||
107, agg);
|
HELPER.assertCounter("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
|
||||||
HELPER.assertCounter(
|
+ "_metric_flushesQueuedCount", 6, agg);
|
||||||
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_replicaid",
|
HELPER.assertCounter("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
|
||||||
1, agg);
|
+ "_metric_maxCompactionQueueSize", 4, agg);
|
||||||
HELPER.assertCounter(
|
HELPER.assertCounter("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001"
|
||||||
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_compactionsQueuedCount",
|
+ "_metric_maxFlushQueueSize", 6, agg);
|
||||||
4, agg);
|
|
||||||
HELPER.assertCounter(
|
|
||||||
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_flushesQueuedCount",
|
|
||||||
6, agg);
|
|
||||||
HELPER.assertCounter(
|
|
||||||
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_maxCompactionQueueSize",
|
|
||||||
4, agg);
|
|
||||||
HELPER.assertCounter(
|
|
||||||
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_maxFlushQueueSize",
|
|
||||||
6, agg);
|
|
||||||
mr.close();
|
mr.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -393,17 +393,30 @@ public class TestRegionMergeTransactionOnCluster {
|
||||||
for (Pair<RegionInfo, ServerName> p : currentRegionToServers) {
|
for (Pair<RegionInfo, ServerName> p : currentRegionToServers) {
|
||||||
currentRegions.add(p.getFirst());
|
currentRegions.add(p.getFirst());
|
||||||
}
|
}
|
||||||
assertTrue(initialRegions.contains(mergedRegions.getFirst())); //this is the first region
|
// this is the first region
|
||||||
assertTrue(initialRegions.contains(RegionReplicaUtil
|
assertTrue(initialRegions.contains(mergedRegions.getFirst()));
|
||||||
.getRegionInfoForReplica(mergedRegions.getFirst(), 1))); //this is the replica of the first region
|
// this is the replica of the first region
|
||||||
assertTrue(initialRegions.contains(mergedRegions.getSecond())); //this is the second region
|
assertTrue(initialRegions
|
||||||
assertTrue(initialRegions.contains(RegionReplicaUtil
|
.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getFirst(), 1)));
|
||||||
.getRegionInfoForReplica(mergedRegions.getSecond(), 1))); //this is the replica of the second region
|
// this is the second region
|
||||||
assertTrue(!initialRegions.contains(currentRegions.get(0))); //this is the new region
|
assertTrue(initialRegions.contains(mergedRegions.getSecond()));
|
||||||
assertTrue(!initialRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(currentRegions.get(0), 1))); //replica of the new region
|
// this is the replica of the second region
|
||||||
assertTrue(currentRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(currentRegions.get(0), 1))); //replica of the new region
|
assertTrue(initialRegions
|
||||||
assertTrue(!currentRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getFirst(), 1))); //replica of the merged region
|
.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getSecond(), 1)));
|
||||||
assertTrue(!currentRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getSecond(), 1))); //replica of the merged region
|
// this is the new region
|
||||||
|
assertTrue(!initialRegions.contains(currentRegions.get(0)));
|
||||||
|
// replica of the new region
|
||||||
|
assertTrue(!initialRegions
|
||||||
|
.contains(RegionReplicaUtil.getRegionInfoForReplica(currentRegions.get(0), 1)));
|
||||||
|
// replica of the new region
|
||||||
|
assertTrue(currentRegions
|
||||||
|
.contains(RegionReplicaUtil.getRegionInfoForReplica(currentRegions.get(0), 1)));
|
||||||
|
// replica of the merged region
|
||||||
|
assertTrue(!currentRegions
|
||||||
|
.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getFirst(), 1)));
|
||||||
|
// replica of the merged region
|
||||||
|
assertTrue(!currentRegions
|
||||||
|
.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getSecond(), 1)));
|
||||||
table.close();
|
table.close();
|
||||||
} finally {
|
} finally {
|
||||||
TEST_UTIL.deleteTable(tableName);
|
TEST_UTIL.deleteTable(tableName);
|
||||||
|
|
|
@ -709,21 +709,29 @@ public class TestStoreScanner {
|
||||||
@Test
|
@Test
|
||||||
public void testWildCardScannerUnderDeletes() throws IOException {
|
public void testWildCardScannerUnderDeletes() throws IOException {
|
||||||
KeyValue[] kvs = new KeyValue[] {
|
KeyValue[] kvs = new KeyValue[] {
|
||||||
create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"), // inc
|
// inc
|
||||||
|
create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"),
|
||||||
// orphaned delete column.
|
// orphaned delete column.
|
||||||
create("R1", "cf", "a", 1, KeyValue.Type.DeleteColumn, "dont-care"),
|
create("R1", "cf", "a", 1, KeyValue.Type.DeleteColumn, "dont-care"),
|
||||||
// column b
|
// column b
|
||||||
create("R1", "cf", "b", 2, KeyValue.Type.Put, "dont-care"), // inc
|
// inc
|
||||||
create("R1", "cf", "b", 1, KeyValue.Type.Put, "dont-care"), // inc
|
create("R1", "cf", "b", 2, KeyValue.Type.Put, "dont-care"),
|
||||||
|
// inc
|
||||||
|
create("R1", "cf", "b", 1, KeyValue.Type.Put, "dont-care"),
|
||||||
// column c
|
// column c
|
||||||
create("R1", "cf", "c", 10, KeyValue.Type.Delete, "dont-care"),
|
create("R1", "cf", "c", 10, KeyValue.Type.Delete, "dont-care"),
|
||||||
create("R1", "cf", "c", 10, KeyValue.Type.Put, "dont-care"), // no
|
// no
|
||||||
create("R1", "cf", "c", 9, KeyValue.Type.Put, "dont-care"), // inc
|
create("R1", "cf", "c", 10, KeyValue.Type.Put, "dont-care"),
|
||||||
|
// inc
|
||||||
|
create("R1", "cf", "c", 9, KeyValue.Type.Put, "dont-care"),
|
||||||
// column d
|
// column d
|
||||||
create("R1", "cf", "d", 11, KeyValue.Type.Put, "dont-care"), // inc
|
// inc
|
||||||
|
create("R1", "cf", "d", 11, KeyValue.Type.Put, "dont-care"),
|
||||||
create("R1", "cf", "d", 10, KeyValue.Type.DeleteColumn, "dont-care"),
|
create("R1", "cf", "d", 10, KeyValue.Type.DeleteColumn, "dont-care"),
|
||||||
create("R1", "cf", "d", 9, KeyValue.Type.Put, "dont-care"), // no
|
// no
|
||||||
create("R1", "cf", "d", 8, KeyValue.Type.Put, "dont-care"), // no
|
create("R1", "cf", "d", 9, KeyValue.Type.Put, "dont-care"),
|
||||||
|
// no
|
||||||
|
create("R1", "cf", "d", 8, KeyValue.Type.Put, "dont-care"),
|
||||||
|
|
||||||
};
|
};
|
||||||
List<KeyValueScanner> scanners = scanFixture(kvs);
|
List<KeyValueScanner> scanners = scanFixture(kvs);
|
||||||
|
@ -926,6 +934,7 @@ public class TestStoreScanner {
|
||||||
return now;
|
return now;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
// @formatter:off
|
||||||
KeyValue[] kvs = new KeyValue[]{
|
KeyValue[] kvs = new KeyValue[]{
|
||||||
/*0*/ new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null,
|
/*0*/ new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null,
|
||||||
now - 100, KeyValue.Type.DeleteFamily), // live
|
now - 100, KeyValue.Type.DeleteFamily), // live
|
||||||
|
@ -960,6 +969,7 @@ public class TestStoreScanner {
|
||||||
/*15*/ create("R1", "cf", "d", now - 100,
|
/*15*/ create("R1", "cf", "d", now - 100,
|
||||||
KeyValue.Type.Delete, "not-expired delete"), // live
|
KeyValue.Type.Delete, "not-expired delete"), // live
|
||||||
};
|
};
|
||||||
|
// @formatter:on
|
||||||
List<KeyValueScanner> scanners = scanFixture(kvs);
|
List<KeyValueScanner> scanners = scanFixture(kvs);
|
||||||
ScanInfo scanInfo = new ScanInfo(CONF, Bytes.toBytes("cf"),
|
ScanInfo scanInfo = new ScanInfo(CONF, Bytes.toBytes("cf"),
|
||||||
0 /* minVersions */,
|
0 /* minVersions */,
|
||||||
|
|
|
@ -17,6 +17,9 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.regionserver.throttle;
|
package org.apache.hadoop.hbase.regionserver.throttle;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hbase.regionserver.throttle.PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND;
|
||||||
|
import static org.apache.hadoop.hbase.regionserver.throttle.PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND;
|
||||||
|
import static org.apache.hadoop.hbase.regionserver.throttle.PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
@ -111,14 +114,8 @@ public class TestCompactionWithThroughputController {
|
||||||
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
|
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
|
||||||
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
|
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
|
||||||
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
|
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
|
||||||
conf.setLong(
|
conf.setLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND, throughputLimit);
|
||||||
PressureAwareCompactionThroughputController
|
conf.setLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND, throughputLimit);
|
||||||
.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
|
|
||||||
throughputLimit);
|
|
||||||
conf.setLong(
|
|
||||||
PressureAwareCompactionThroughputController
|
|
||||||
.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
|
|
||||||
throughputLimit);
|
|
||||||
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
|
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
|
||||||
PressureAwareCompactionThroughputController.class.getName());
|
PressureAwareCompactionThroughputController.class.getName());
|
||||||
TEST_UTIL.startMiniCluster(1);
|
TEST_UTIL.startMiniCluster(1);
|
||||||
|
@ -183,21 +180,13 @@ public class TestCompactionWithThroughputController {
|
||||||
public void testThroughputTuning() throws Exception {
|
public void testThroughputTuning() throws Exception {
|
||||||
Configuration conf = TEST_UTIL.getConfiguration();
|
Configuration conf = TEST_UTIL.getConfiguration();
|
||||||
conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
|
conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
|
||||||
conf.setLong(
|
conf.setLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND, 20L * 1024 * 1024);
|
||||||
PressureAwareCompactionThroughputController
|
conf.setLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND, 10L * 1024 * 1024);
|
||||||
.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
|
|
||||||
20L * 1024 * 1024);
|
|
||||||
conf.setLong(
|
|
||||||
PressureAwareCompactionThroughputController
|
|
||||||
.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
|
|
||||||
10L * 1024 * 1024);
|
|
||||||
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 4);
|
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 4);
|
||||||
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 6);
|
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 6);
|
||||||
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
|
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
|
||||||
PressureAwareCompactionThroughputController.class.getName());
|
PressureAwareCompactionThroughputController.class.getName());
|
||||||
conf.setInt(
|
conf.setInt(HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD, 1000);
|
||||||
PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD,
|
|
||||||
1000);
|
|
||||||
TEST_UTIL.startMiniCluster(1);
|
TEST_UTIL.startMiniCluster(1);
|
||||||
Connection conn = ConnectionFactory.createConnection(conf);
|
Connection conn = ConnectionFactory.createConnection(conf);
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -38,7 +38,7 @@ import org.junit.ClassRule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This UT is used to make sure that we will not accidentally change the way to generate online
|
* This UT is used to make sure that we will not accidentally change the way to generate online
|
||||||
|
@ -95,8 +95,8 @@ public class TestRefreshPeerWhileRegionServerRestarts extends TestReplicationBas
|
||||||
UTIL1.waitFor(30000, () -> {
|
UTIL1.waitFor(30000, () -> {
|
||||||
for (Procedure<?> proc : UTIL1.getMiniHBaseCluster().getMaster().getProcedures()) {
|
for (Procedure<?> proc : UTIL1.getMiniHBaseCluster().getMaster().getProcedures()) {
|
||||||
if (proc instanceof DisablePeerProcedure) {
|
if (proc instanceof DisablePeerProcedure) {
|
||||||
return ((DisablePeerProcedure) proc).getCurrentStateId() ==
|
return ((DisablePeerProcedure) proc)
|
||||||
MasterProcedureProtos.PeerModificationState.POST_PEER_MODIFICATION_VALUE;
|
.getCurrentStateId() == PeerModificationState.POST_PEER_MODIFICATION_VALUE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -288,11 +288,15 @@ public class TestZKMulti {
|
||||||
// test that, even with operations that fail, the ones that would pass will pass
|
// test that, even with operations that fail, the ones that would pass will pass
|
||||||
// with runSequentialOnMultiFailure
|
// with runSequentialOnMultiFailure
|
||||||
ops = new LinkedList<>();
|
ops = new LinkedList<>();
|
||||||
ops.add(ZKUtilOp.setData(path1, Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1)))); // pass
|
// pass
|
||||||
ops.add(ZKUtilOp.deleteNodeFailSilent(path2)); // pass
|
ops.add(ZKUtilOp.setData(path1, Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1))));
|
||||||
ops.add(ZKUtilOp.deleteNodeFailSilent(path3)); // fail -- node doesn't exist
|
// pass
|
||||||
ops.add(ZKUtilOp.createAndFailSilent(path4,
|
ops.add(ZKUtilOp.deleteNodeFailSilent(path2));
|
||||||
Bytes.add(Bytes.toBytes(path4), Bytes.toBytes(path4)))); // pass
|
// fail -- node doesn't exist
|
||||||
|
ops.add(ZKUtilOp.deleteNodeFailSilent(path3));
|
||||||
|
// pass
|
||||||
|
ops.add(
|
||||||
|
ZKUtilOp.createAndFailSilent(path4, Bytes.add(Bytes.toBytes(path4), Bytes.toBytes(path4))));
|
||||||
ZKUtil.multiOrSequential(zkw, ops, true);
|
ZKUtil.multiOrSequential(zkw, ops, true);
|
||||||
assertTrue(Bytes.equals(ZKUtil.getData(zkw, path1),
|
assertTrue(Bytes.equals(ZKUtil.getData(zkw, path1),
|
||||||
Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1))));
|
Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1))));
|
||||||
|
|
Loading…
Reference in New Issue