HBASE-11373 hbase-protocol compile failed for name conflict of RegionTransition

This commit is contained in:
Jimmy Xiang 2014-06-18 08:38:05 -07:00
parent 6834c929cc
commit 175f133dbc
12 changed files with 379 additions and 378 deletions

View File

@ -78,7 +78,7 @@ message GetLastFlushedSequenceIdResponse {
required uint64 last_flushed_sequence_id = 1; required uint64 last_flushed_sequence_id = 1;
} }
message RegionTransition { message RegionStateTransition {
required TransitionCode transition_code = 1; required TransitionCode transition_code = 1;
/** Mutliple regions are involved during merging/splitting */ /** Mutliple regions are involved during merging/splitting */
@ -107,14 +107,14 @@ message RegionTransition {
} }
} }
message ReportRegionTransitionRequest { message ReportRegionStateTransitionRequest {
/** This region server's server name */ /** This region server's server name */
required ServerName server = 1; required ServerName server = 1;
repeated RegionTransition transition = 2; repeated RegionStateTransition transition = 2;
} }
message ReportRegionTransitionResponse { message ReportRegionStateTransitionResponse {
/** Error message if failed to update the region state */ /** Error message if failed to update the region state */
optional string error_message = 1; optional string error_message = 1;
} }
@ -146,6 +146,6 @@ service RegionServerStatusService {
* transition. If the request fails, the transition should * transition. If the request fails, the transition should
* be aborted. * be aborted.
*/ */
rpc ReportRegionTransition(ReportRegionTransitionRequest) rpc ReportRegionStateTransition(ReportRegionStateTransitionRequest)
returns(ReportRegionTransitionResponse); returns(ReportRegionStateTransitionResponse);
} }

View File

@ -60,9 +60,9 @@ import org.apache.hadoop.hbase.TableStateManager;
import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.coordination.RegionMergeCoordination;
import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
import org.apache.hadoop.hbase.coordination.OpenRegionCoordination; import org.apache.hadoop.hbase.coordination.OpenRegionCoordination;
import org.apache.hadoop.hbase.coordination.RegionMergeCoordination;
import org.apache.hadoop.hbase.coordination.SplitTransactionCoordination.SplitTransactionDetails; import org.apache.hadoop.hbase.coordination.SplitTransactionCoordination.SplitTransactionDetails;
import org.apache.hadoop.hbase.coordination.ZkOpenRegionCoordination; import org.apache.hadoop.hbase.coordination.ZkOpenRegionCoordination;
import org.apache.hadoop.hbase.coordination.ZkRegionMergeCoordination; import org.apache.hadoop.hbase.coordination.ZkRegionMergeCoordination;
@ -80,8 +80,8 @@ import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler;
import org.apache.hadoop.hbase.master.handler.DisableTableHandler; import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler; import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler; import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionTransition.TransitionCode; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException; import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
@ -3808,7 +3808,7 @@ public class AssignmentManager extends ZooKeeperListener {
* region open/close * region open/close
*/ */
protected String onRegionTransition(final ServerName serverName, protected String onRegionTransition(final ServerName serverName,
final RegionServerStatusProtos.RegionTransition transition) { final RegionStateTransition transition) {
TransitionCode code = transition.getTransitionCode(); TransitionCode code = transition.getTransitionCode();
HRegionInfo hri = HRegionInfo.convert(transition.getRegionInfo(0)); HRegionInfo hri = HRegionInfo.convert(transition.getRegionInfo(0));
RegionState current = regionStates.getRegionState(hri); RegionState current = regionStates.getRegionState(hri);

View File

@ -142,11 +142,11 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Regio
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionTransition; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionTransitionRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionTransitionResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.regionserver.RSRpcServices;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
@ -1244,11 +1244,11 @@ public class MasterRpcServices extends RSRpcServices
} }
@Override @Override
public ReportRegionTransitionResponse reportRegionTransition(RpcController controller, public ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController c,
ReportRegionTransitionRequest req) throws ServiceException { ReportRegionStateTransitionRequest req) throws ServiceException {
try { try {
master.checkServiceStarted(); master.checkServiceStarted();
RegionTransition rt = req.getTransition(0); RegionStateTransition rt = req.getTransition(0);
TableName tableName = ProtobufUtil.toTableName( TableName tableName = ProtobufUtil.toTableName(
rt.getRegionInfo(0).getTableName()); rt.getRegionInfo(0).getTableName());
if (!TableName.META_TABLE_NAME.equals(tableName) if (!TableName.META_TABLE_NAME.equals(tableName)
@ -1259,8 +1259,8 @@ public class MasterRpcServices extends RSRpcServices
} }
ServerName sn = ProtobufUtil.toServerName(req.getServer()); ServerName sn = ProtobufUtil.toServerName(req.getServer());
String error = master.assignmentManager.onRegionTransition(sn, rt); String error = master.assignmentManager.onRegionTransition(sn, rt);
ReportRegionTransitionResponse.Builder rrtr = ReportRegionStateTransitionResponse.Builder rrtr =
ReportRegionTransitionResponse.newBuilder(); ReportRegionStateTransitionResponse.newBuilder();
if (error != null) { if (error != null) {
rrtr.setErrorMessage(error); rrtr.setErrorMessage(error);
} }

View File

@ -108,11 +108,11 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Regio
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionTransition; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionTransition.TransitionCode; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionTransitionRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionTransitionResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler; import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler;
import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler; import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler;
@ -1683,7 +1683,7 @@ public class HRegionServer extends HasThread implements
MetaEditor.updateRegionLocation(ct, r.getRegionInfo(), MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
this.serverName, openSeqNum); this.serverName, openSeqNum);
} }
if (!useZKForAssignment && !reportRegionTransition( if (!useZKForAssignment && !reportRegionStateTransition(
TransitionCode.OPENED, openSeqNum, r.getRegionInfo())) { TransitionCode.OPENED, openSeqNum, r.getRegionInfo())) {
throw new IOException("Failed to report opened region to master: " throw new IOException("Failed to report opened region to master: "
+ r.getRegionNameAsString()); + r.getRegionNameAsString());
@ -1693,16 +1693,17 @@ public class HRegionServer extends HasThread implements
} }
@Override @Override
public boolean reportRegionTransition(TransitionCode code, HRegionInfo... hris) { public boolean reportRegionStateTransition(TransitionCode code, HRegionInfo... hris) {
return reportRegionTransition(code, HConstants.NO_SEQNUM, hris); return reportRegionStateTransition(code, HConstants.NO_SEQNUM, hris);
} }
@Override @Override
public boolean reportRegionTransition( public boolean reportRegionStateTransition(
TransitionCode code, long openSeqNum, HRegionInfo... hris) { TransitionCode code, long openSeqNum, HRegionInfo... hris) {
ReportRegionTransitionRequest.Builder builder = ReportRegionTransitionRequest.newBuilder(); ReportRegionStateTransitionRequest.Builder builder =
ReportRegionStateTransitionRequest.newBuilder();
builder.setServer(ProtobufUtil.toServerName(serverName)); builder.setServer(ProtobufUtil.toServerName(serverName));
RegionTransition.Builder transition = builder.addTransitionBuilder(); RegionStateTransition.Builder transition = builder.addTransitionBuilder();
transition.setTransitionCode(code); transition.setTransitionCode(code);
if (code == TransitionCode.OPENED && openSeqNum >= 0) { if (code == TransitionCode.OPENED && openSeqNum >= 0) {
transition.setOpenSeqNum(openSeqNum); transition.setOpenSeqNum(openSeqNum);
@ -1710,7 +1711,7 @@ public class HRegionServer extends HasThread implements
for (HRegionInfo hri: hris) { for (HRegionInfo hri: hris) {
transition.addRegionInfo(HRegionInfo.convert(hri)); transition.addRegionInfo(HRegionInfo.convert(hri));
} }
ReportRegionTransitionRequest request = builder.build(); ReportRegionStateTransitionRequest request = builder.build();
while (keepLooping()) { while (keepLooping()) {
RegionServerStatusService.BlockingInterface rss = rssStub; RegionServerStatusService.BlockingInterface rss = rssStub;
try { try {
@ -1718,8 +1719,8 @@ public class HRegionServer extends HasThread implements
createRegionServerStatusStub(); createRegionServerStatusStub();
continue; continue;
} }
ReportRegionTransitionResponse response = ReportRegionStateTransitionResponse response =
rss.reportRegionTransition(null, request); rss.reportRegionStateTransition(null, request);
if (response.hasErrorMessage()) { if (response.hasErrorMessage()) {
LOG.info("Failed to transition " + hris[0] LOG.info("Failed to transition " + hris[0]
+ " to " + code + ": " + response.getErrorMessage()); + " to " + code + ": " + response.getErrorMessage());

View File

@ -40,8 +40,8 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionTransition.TransitionCode;
import org.apache.hadoop.hbase.coordination.RegionMergeCoordination.RegionMergeDetails; import org.apache.hadoop.hbase.coordination.RegionMergeCoordination.RegionMergeDetails;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.regionserver.SplitTransaction.LoggingProgressable; import org.apache.hadoop.hbase.regionserver.SplitTransaction.LoggingProgressable;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ConfigUtil; import org.apache.hadoop.hbase.util.ConfigUtil;
@ -332,7 +332,7 @@ public class RegionMergeTransaction {
region_a.getRegionInfo(), region_b.getRegionInfo(), server.getServerName(), metaEntries); region_a.getRegionInfo(), region_b.getRegionInfo(), server.getServerName(), metaEntries);
} }
} else if (services != null && !useCoordinationForAssignment) { } else if (services != null && !useCoordinationForAssignment) {
if (!services.reportRegionTransition(TransitionCode.MERGE_PONR, if (!services.reportRegionStateTransition(TransitionCode.MERGE_PONR,
mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
// Passed PONR, let SSH clean it up // Passed PONR, let SSH clean it up
throw new IOException("Failed to notify master that merge passed PONR: " throw new IOException("Failed to notify master that merge passed PONR: "
@ -368,7 +368,6 @@ public class RegionMergeTransaction {
addLocation(putOfMerged, serverName, 1); addLocation(putOfMerged, serverName, 1);
} }
@SuppressWarnings("deprecation")
public Put addLocation(final Put p, final ServerName sn, long openSeqNum) { public Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
p.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes p.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes
.toBytes(sn.getHostAndPort())); .toBytes(sn.getHostAndPort()));
@ -398,7 +397,7 @@ public class RegionMergeTransaction {
+ this.mergedRegionInfo.getRegionNameAsString(), e); + this.mergedRegionInfo.getRegionNameAsString(), e);
} }
} else if (services != null && !useCoordinationForAssignment) { } else if (services != null && !useCoordinationForAssignment) {
if (!services.reportRegionTransition(TransitionCode.READY_TO_MERGE, if (!services.reportRegionStateTransition(TransitionCode.READY_TO_MERGE,
mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
throw new IOException("Failed to get ok from master to merge " throw new IOException("Failed to get ok from master to merge "
+ region_a.getRegionInfo().getRegionNameAsString() + " and " + region_a.getRegionInfo().getRegionNameAsString() + " and "
@ -581,7 +580,7 @@ public class RegionMergeTransaction {
try { try {
if (useCoordinationForAssignment) { if (useCoordinationForAssignment) {
services.postOpenDeployTasks(merged, server.getCatalogTracker()); services.postOpenDeployTasks(merged, server.getCatalogTracker());
} else if (!services.reportRegionTransition(TransitionCode.MERGED, } else if (!services.reportRegionStateTransition(TransitionCode.MERGED,
mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
throw new IOException("Failed to report merged region to master: " throw new IOException("Failed to report merged region to master: "
+ mergedRegionInfo.getShortNameToLog()); + mergedRegionInfo.getShortNameToLog());
@ -656,7 +655,7 @@ public class RegionMergeTransaction {
((BaseCoordinatedStateManager) server.getCoordinatedStateManager()) ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
.getRegionMergeCoordination().clean(this.mergedRegionInfo); .getRegionMergeCoordination().clean(this.mergedRegionInfo);
} else if (services != null && !useCoordinationForAssignment } else if (services != null && !useCoordinationForAssignment
&& !services.reportRegionTransition(TransitionCode.MERGE_REVERTED, && !services.reportRegionStateTransition(TransitionCode.MERGE_REVERTED,
mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
return false; return false;
} }

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionTransition.TransitionCode; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException;
@ -83,12 +83,12 @@ public interface RegionServerServices
/** /**
* Notify master that a handler requests to change a region state * Notify master that a handler requests to change a region state
*/ */
boolean reportRegionTransition(TransitionCode code, long openSeqNum, HRegionInfo... hris); boolean reportRegionStateTransition(TransitionCode code, long openSeqNum, HRegionInfo... hris);
/** /**
* Notify master that a handler requests to change a region state * Notify master that a handler requests to change a region state
*/ */
boolean reportRegionTransition(TransitionCode code, HRegionInfo... hris); boolean reportRegionStateTransition(TransitionCode code, HRegionInfo... hris);
/** /**
* Returns a reference to the region server's RPC server * Returns a reference to the region server's RPC server

View File

@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
import org.apache.hadoop.hbase.coordination.SplitTransactionCoordination; import org.apache.hadoop.hbase.coordination.SplitTransactionCoordination;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionTransition.TransitionCode; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.ConfigUtil; import org.apache.hadoop.hbase.util.ConfigUtil;
@ -286,7 +286,7 @@ public class SplitTransaction {
.getSecond().getRegionInfo(), server.getServerName(), metaEntries); .getSecond().getRegionInfo(), server.getServerName(), metaEntries);
} }
} else if (services != null && !useZKForAssignment) { } else if (services != null && !useZKForAssignment) {
if (!services.reportRegionTransition(TransitionCode.SPLIT_PONR, if (!services.reportRegionStateTransition(TransitionCode.SPLIT_PONR,
parent.getRegionInfo(), hri_a, hri_b)) { parent.getRegionInfo(), hri_a, hri_b)) {
// Passed PONR, let SSH clean it up // Passed PONR, let SSH clean it up
throw new IOException("Failed to notify master that split passed PONR: " throw new IOException("Failed to notify master that split passed PONR: "
@ -309,7 +309,7 @@ public class SplitTransaction {
.getSplitTransactionCoordination().startSplitTransaction(parent, server.getServerName(), .getSplitTransactionCoordination().startSplitTransaction(parent, server.getServerName(),
hri_a, hri_b); hri_a, hri_b);
} else if (services != null && !useZKForAssignment) { } else if (services != null && !useZKForAssignment) {
if (!services.reportRegionTransition(TransitionCode.READY_TO_SPLIT, if (!services.reportRegionStateTransition(TransitionCode.READY_TO_SPLIT,
parent.getRegionInfo(), hri_a, hri_b)) { parent.getRegionInfo(), hri_a, hri_b)) {
throw new IOException("Failed to get ok from master to split " throw new IOException("Failed to get ok from master to split "
+ parent.getRegionNameAsString()); + parent.getRegionNameAsString());
@ -419,7 +419,7 @@ public class SplitTransaction {
if (useZKForAssignment) { if (useZKForAssignment) {
// add 2nd daughter first (see HBASE-4335) // add 2nd daughter first (see HBASE-4335)
services.postOpenDeployTasks(b, server.getCatalogTracker()); services.postOpenDeployTasks(b, server.getCatalogTracker());
} else if (!services.reportRegionTransition(TransitionCode.SPLIT, } else if (!services.reportRegionStateTransition(TransitionCode.SPLIT,
parent.getRegionInfo(), hri_a, hri_b)) { parent.getRegionInfo(), hri_a, hri_b)) {
throw new IOException("Failed to report split region to master: " throw new IOException("Failed to report split region to master: "
+ parent.getRegionInfo().getShortNameToLog()); + parent.getRegionInfo().getShortNameToLog());
@ -711,7 +711,7 @@ public class SplitTransaction {
((BaseCoordinatedStateManager) server.getCoordinatedStateManager()) ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
.getSplitTransactionCoordination().clean(this.parent.getRegionInfo()); .getSplitTransactionCoordination().clean(this.parent.getRegionInfo());
} else if (services != null && !useZKForAssignment } else if (services != null && !useZKForAssignment
&& !services.reportRegionTransition(TransitionCode.SPLIT_REVERTED, && !services.reportRegionStateTransition(TransitionCode.SPLIT_REVERTED,
parent.getRegionInfo(), hri_a, hri_b)) { parent.getRegionInfo(), hri_a, hri_b)) {
return false; return false;
} }

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.coordination.CloseRegionCoordination; import org.apache.hadoop.hbase.coordination.CloseRegionCoordination;
import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionTransition.TransitionCode; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.util.ConfigUtil; import org.apache.hadoop.hbase.util.ConfigUtil;
@ -154,7 +154,7 @@ public class CloseRegionHandler extends EventHandler {
this.rsServices.removeFromOnlineRegions(region, destination); this.rsServices.removeFromOnlineRegions(region, destination);
if (!useZKForAssignment) { if (!useZKForAssignment) {
rsServices.reportRegionTransition(TransitionCode.CLOSED, regionInfo); rsServices.reportRegionStateTransition(TransitionCode.CLOSED, regionInfo);
} else { } else {
closeRegionCoordination.setClosedState(region, this.server.getServerName(), closeRegionCoordination.setClosedState(region, this.server.getServerName(),
closeRegionDetails); closeRegionDetails);

View File

@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.coordination.OpenRegionCoordination; import org.apache.hadoop.hbase.coordination.OpenRegionCoordination;
import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionTransition.TransitionCode; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.RegionServerAccounting; import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@ -208,7 +208,7 @@ public class OpenRegionHandler extends EventHandler {
} }
} finally { } finally {
if (!useZKForAssignment) { if (!useZKForAssignment) {
rsServices.reportRegionTransition(TransitionCode.FAILED_OPEN, regionInfo); rsServices.reportRegionStateTransition(TransitionCode.FAILED_OPEN, regionInfo);
} else { } else {
// Even if cleanupFailed open fails we need to do this transition // Even if cleanupFailed open fails we need to do this transition
// See HBASE-7698 // See HBASE-7698
@ -216,7 +216,7 @@ public class OpenRegionHandler extends EventHandler {
} }
} }
} else if (!useZKForAssignment) { } else if (!useZKForAssignment) {
rsServices.reportRegionTransition(TransitionCode.FAILED_OPEN, regionInfo); rsServices.reportRegionStateTransition(TransitionCode.FAILED_OPEN, regionInfo);
} else { } else {
// If still transition to OPENING is not done, we need to transition znode // If still transition to OPENING is not done, we need to transition znode
// to FAILED_OPEN // to FAILED_OPEN

View File

@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager;
import org.apache.hadoop.hbase.master.TableLockManager.NullTableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.NullTableLockManager;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionTransition.TransitionCode; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.regionserver.CompactionRequestor; import org.apache.hadoop.hbase.regionserver.CompactionRequestor;
import org.apache.hadoop.hbase.regionserver.FlushRequester; import org.apache.hadoop.hbase.regionserver.FlushRequester;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
@ -228,13 +228,13 @@ class MockRegionServerServices implements RegionServerServices {
} }
@Override @Override
public boolean reportRegionTransition(TransitionCode code, long openSeqNum, public boolean reportRegionStateTransition(TransitionCode code, long openSeqNum,
HRegionInfo... hris) { HRegionInfo... hris) {
return false; return false;
} }
@Override @Override
public boolean reportRegionTransition(TransitionCode code, public boolean reportRegionStateTransition(TransitionCode code,
HRegionInfo... hris) { HRegionInfo... hris) {
return false; return false;
} }

View File

@ -84,7 +84,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionTransition.TransitionCode; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.regionserver.CompactionRequestor; import org.apache.hadoop.hbase.regionserver.CompactionRequestor;
import org.apache.hadoop.hbase.regionserver.FlushRequester; import org.apache.hadoop.hbase.regionserver.FlushRequester;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
@ -562,12 +562,12 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
} }
@Override @Override
public boolean reportRegionTransition(TransitionCode code, HRegionInfo... hris) { public boolean reportRegionStateTransition(TransitionCode code, HRegionInfo... hris) {
return false; return false;
} }
@Override @Override
public boolean reportRegionTransition(TransitionCode code, long openSeqNum, public boolean reportRegionStateTransition(TransitionCode code, long openSeqNum,
HRegionInfo... hris) { HRegionInfo... hris) {
return false; return false;
} }