required .RegionInfo regionInfo = 1;
+ */
+ boolean hasRegionInfo();
+ /**
+ * required .RegionInfo regionInfo = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo();
+ /**
+ * required .RegionInfo regionInfo = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder();
+ }
+ /**
+ * Protobuf type {@code WarmupRegionRequest}
+ */
+ public static final class WarmupRegionRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements WarmupRegionRequestOrBuilder {
+ // Use WarmupRegionRequest.newBuilder() to construct.
+ private WarmupRegionRequest(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private WarmupRegionRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final WarmupRegionRequest defaultInstance;
+ public static WarmupRegionRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public WarmupRegionRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private WarmupRegionRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = regionInfo_.toBuilder();
+ }
+ regionInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(regionInfo_);
+ regionInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_WarmupRegionRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_WarmupRegionRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parserrequired .RegionInfo regionInfo = 1;
+ */
+ public boolean hasRegionInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .RegionInfo regionInfo = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
+ return regionInfo_;
+ }
+ /**
+ * required .RegionInfo regionInfo = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
+ return regionInfo_;
+ }
+
+ private void initFields() {
+ regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasRegionInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getRegionInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, regionInfo_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, regionInfo_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest) obj;
+
+ boolean result = true;
+ result = result && (hasRegionInfo() == other.hasRegionInfo());
+ if (hasRegionInfo()) {
+ result = result && getRegionInfo()
+ .equals(other.getRegionInfo());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasRegionInfo()) {
+ hash = (37 * hash) + REGIONINFO_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionInfo().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code WarmupRegionRequest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builderrequired .RegionInfo regionInfo = 1;
+ */
+ public boolean hasRegionInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .RegionInfo regionInfo = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_;
+ } else {
+ return regionInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .RegionInfo regionInfo = 1;
+ */
+ public Builder setRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ regionInfo_ = value;
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .RegionInfo regionInfo = 1;
+ */
+ public Builder setRegionInfo(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .RegionInfo regionInfo = 1;
+ */
+ public Builder mergeRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ regionInfo_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) {
+ regionInfo_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial();
+ } else {
+ regionInfo_ = value;
+ }
+ onChanged();
+ } else {
+ regionInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .RegionInfo regionInfo = 1;
+ */
+ public Builder clearRegionInfo() {
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
+ onChanged();
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .RegionInfo regionInfo = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getRegionInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * required .RegionInfo regionInfo = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
+ if (regionInfoBuilder_ != null) {
+ return regionInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return regionInfo_;
+ }
+ }
+ /**
+ * required .RegionInfo regionInfo = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoFieldBuilder() {
+ if (regionInfoBuilder_ == null) {
+ regionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
+ regionInfo_,
+ getParentForChildren(),
+ isClean());
+ regionInfo_ = null;
+ }
+ return regionInfoBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:WarmupRegionRequest)
+ }
+
+ static {
+ defaultInstance = new WarmupRegionRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:WarmupRegionRequest)
+ }
+
+ public interface WarmupRegionResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+ }
+ /**
+ * Protobuf type {@code WarmupRegionResponse}
+ */
+ public static final class WarmupRegionResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements WarmupRegionResponseOrBuilder {
+ // Use WarmupRegionResponse.newBuilder() to construct.
+ private WarmupRegionResponse(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private WarmupRegionResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final WarmupRegionResponse defaultInstance;
+ public static WarmupRegionResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public WarmupRegionResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private WarmupRegionResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_WarmupRegionResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_WarmupRegionResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.Builder.class);
+ }
+
+ public static com.google.protobuf.Parserrpc WarmupRegion(.WarmupRegionRequest) returns (.WarmupRegionResponse);
+ */
+ public abstract void warmupRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest request,
+ com.google.protobuf.RpcCallbackrpc CloseRegion(.CloseRegionRequest) returns (.CloseRegionResponse);
*/
@@ -20952,6 +21859,14 @@ public final class AdminProtos {
impl.openRegion(controller, request, done);
}
+ @java.lang.Override
+ public void warmupRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest request,
+ com.google.protobuf.RpcCallbackrpc WarmupRegion(.WarmupRegionRequest) returns (.WarmupRegionResponse);
+ */
+ public abstract void warmupRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest request,
+ com.google.protobuf.RpcCallbackrpc CloseRegion(.CloseRegionRequest) returns (.CloseRegionResponse);
*/
@@ -21373,61 +22302,66 @@ public final class AdminProtos {
done));
return;
case 4:
+ this.warmupRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest)request,
+ com.google.protobuf.RpcUtil.
+ * A region server could reject the close request because it either does not
+ * have the specified region or the region is being split.
+ * @param server server to warmup a region
+ * @param region region to warmup
+ */
+ public void sendRegionWarmup(ServerName server,
+ HRegionInfo region) {
+ if (server == null) return;
+ try {
+ AdminService.BlockingInterface admin = getRsAdmin(server);
+ ProtobufUtil.warmupRegion(admin, region);
+ } catch (IOException e) {
+ LOG.error("Received exception in RPC for warmup server:" +
+ server + "region: " + region +
+ "exception: " + e);
+ }
+ }
+
/**
* Contacts a region server and waits up to timeout ms
* to close the region. This bypasses the active hmaster.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 704947daf32..92332756519 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -849,11 +849,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
status.setStatus("Writing region info on filesystem");
fs.checkRegionInfoOnFilesystem();
-
-
// Initialize all the HStores
status.setStatus("Initializing all the Stores");
- long maxSeqId = initializeRegionStores(reporter, status);
+ long maxSeqId = initializeRegionStores(reporter, status, false);
this.lastReplayedOpenRegionSeqId = maxSeqId;
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
@@ -916,8 +914,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
return nextSeqid;
}
- private long initializeRegionStores(final CancelableProgressable reporter, MonitoredTask status)
+ private long initializeRegionStores(final CancelableProgressable reporter, MonitoredTask status,
+ boolean warmupOnly)
throws IOException {
+
// Load in all the HStores.
long maxSeqId = -1;
@@ -979,7 +979,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
}
}
}
- if (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
+ if (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this) && !warmupOnly) {
// Recover any edits if available.
maxSeqId = Math.max(maxSeqId, replayRecoveredEditsIfAny(
this.fs.getRegionDir(), maxSeqIdInStores, reporter, status));
@@ -989,6 +989,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
return maxSeqId;
}
+ private void initializeWarmup(final CancelableProgressable reporter) throws IOException {
+ MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this);
+
+ // Initialize all the HStores
+ status.setStatus("Warming up all the Stores");
+ initializeRegionStores(reporter, status, true);
+ }
+
private void writeRegionOpenMarker(WAL wal, long openSeqId) throws IOException {
Map
+ *
+ * @param controller the RPC controller
+ * @param request the request
+ * @throws ServiceException
+ */
+ public WarmupRegionResponse warmupRegion(final RpcController controller,
+ final WarmupRegionRequest request) throws ServiceException {
+
+ RegionInfo regionInfo = request.getRegionInfo();
+ final HRegionInfo region = HRegionInfo.convert(regionInfo);
+ HTableDescriptor htd;
+ WarmupRegionResponse response = WarmupRegionResponse.getDefaultInstance();
+
+ try {
+ String encodedName = region.getEncodedName();
+ byte[] encodedNameBytes = region.getEncodedNameAsBytes();
+ final HRegion onlineRegion = regionServer.getFromOnlineRegions(encodedName);
+
+ if (onlineRegion != null) {
+ LOG.info("Region already online. Skipping warming up " + region);
+ return response;
+ }
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Warming up Region " + region.getRegionNameAsString());
+ }
+
+ htd = regionServer.tableDescriptors.get(region.getTable());
+
+ if (regionServer.getRegionsInTransitionInRS().containsKey(encodedNameBytes)) {
+ LOG.info("Region is in transition. Skipping warmup " + region);
+ return response;
+ }
+
+ HRegion.warmupHRegion(region, htd, regionServer.getWAL(region),
+ regionServer.getConfiguration(), regionServer, null);
+
+ } catch (IOException ie) {
+ LOG.error("Failed warming up region " + region.getRegionNameAsString(), ie);
+ throw new ServiceException(ie);
+ }
+
+ return response;
+ }
+
/**
* Replay the given changes when distributedLogReplay WAL edits from a failed RS. The guarantee is
* that the given mutations will be durable on the receiving RS if this method returns without any
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index 7b5c4942b87..00f8509f01b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -65,6 +65,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsReques
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest;
@@ -452,6 +454,12 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
return null;
}
+ @Override
+ public WarmupRegionResponse warmupRegion(RpcController controller,
+ WarmupRegionRequest request) throws ServiceException {
+ //TODO Auto-generated method stub
+ return null;
+ }
@Override
public CloseRegionResponse closeRegion(RpcController controller,
CloseRegionRequest request) throws ServiceException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java
new file mode 100644
index 00000000000..ab08ef02335
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java
@@ -0,0 +1,163 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.apache.hadoop.hbase.regionserver.HRegion.warmupHRegion;
+import java.io.IOException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.experimental.categories.Category;
+import org.junit.BeforeClass;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.After;
+import org.junit.Test;
+
+/**
+ * Run tests that use the HBase clients; {@link HTable}.
+ * Sets up the HBase mini cluster once at start and runs through all client tests.
+ * Each creates a table named for the method and does its stuff against that.
+ */
+@Category(LargeTests.class)
+@SuppressWarnings ("deprecation")
+public class TestWarmupRegion {
+ final Log LOG = LogFactory.getLog(getClass());
+ protected TableName TABLENAME = TableName.valueOf("testPurgeFutureDeletes");
+ protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static byte [] ROW = Bytes.toBytes("testRow");
+ private static byte [] FAMILY = Bytes.toBytes("testFamily");
+ private static byte [] QUALIFIER = Bytes.toBytes("testQualifier");
+ private static byte [] VALUE = Bytes.toBytes("testValue");
+ private static byte[] COLUMN = Bytes.toBytes("column");
+ private static int numRows = 10000;
+ protected static int SLAVES = 3;
+ private static MiniHBaseCluster myCluster;
+ private static Table table;
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ Configuration conf = TEST_UTIL.getConfiguration();
+ TEST_UTIL.startMiniCluster(SLAVES);
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @Before
+ public void setUp() throws Exception {
+ table = TEST_UTIL.createTable(TABLENAME, FAMILY);
+
+ // future timestamp
+ for (int i = 0; i < numRows; i++) {
+ long ts = System.currentTimeMillis() * 2;
+ Put put = new Put(ROW, ts);
+ put.add(FAMILY, COLUMN, VALUE);
+ table.put(put);
+ }
+
+ // major compaction, purged future deletes
+ TEST_UTIL.getHBaseAdmin().flush(TABLENAME);
+ TEST_UTIL.getHBaseAdmin().majorCompact(TABLENAME);
+
+ // waiting for the major compaction to complete
+ TEST_UTIL.waitFor(6000, new Waiter.Predicate